diff --git a/docs/workflows.rst b/docs/workflows.rst index 0088e38af..ffdc39c19 100644 --- a/docs/workflows.rst +++ b/docs/workflows.rst @@ -47,7 +47,7 @@ then fsnative surface files from the preprocessing derivatives will be warped to Identification of high-motion outlier volumes ============================================= :func:`~xcp_d.workflows.postprocessing.init_prepare_confounds_wf`, -:class:`~xcp_d.interfaces.censoring.GenerateConfounds` +:class:`~xcp_d.interfaces.censoring.GenerateTemporalMask` XCP-D uses framewise displacement to identify high-motion outlier volumes. These outlier volumes are removed from the BOLD data prior to denoising. @@ -62,7 +62,7 @@ The threshold used to identify outlier volumes can be set with the ``--fd-thresh Motion parameter filtering [OPTIONAL] ------------------------------------- :func:`~xcp_d.workflows.postprocessing.init_prepare_confounds_wf`, -:class:`~xcp_d.interfaces.censoring.GenerateConfounds`, +:class:`~xcp_d.interfaces.censoring.ModifyConfounds`, :func:`~xcp_d.utils.confounds.load_motion` Motion parameters may be contaminated with respiratory effects :footcite:p:`power2019distinctions`. @@ -116,7 +116,7 @@ per :footcite:t:`gratton2020removal`. Framewise displacement calculation and thresholding --------------------------------------------------- :func:`~xcp_d.workflows.postprocessing.init_prepare_confounds_wf`, -:class:`~xcp_d.interfaces.censoring.GenerateConfounds`, +:class:`~xcp_d.interfaces.censoring.GenerateTemporalMask`, :func:`~xcp_d.utils.modified_data.compute_fd` Framewise displacement is then calculated according to the formula from :footcite:t:`power_fd_dvars`. @@ -137,7 +137,7 @@ These volumes will later be removed from the denoised data. Confound regressor selection ============================ :func:`~xcp_d.workflows.postprocessing.init_prepare_confounds_wf`, -:func:`~xcp_d.interfaces.censoring.GenerateConfounds` +:func:`~xcp_d.interfaces.censoring.GenerateDesignMatrix` The confound regressor configurations in the table below are implemented in XCP-D, with ``36P`` as the default. @@ -507,13 +507,13 @@ ReHo :func:`~xcp_d.workflows.restingstate.init_reho_nifti_wf`, :func:`~xcp_d.workflows.restingstate.init_reho_cifti_wf` -Regional Homogeneity (ReHo) is a measure of local temporal uniformity in the BOLD signal computed at each voxel of the processed image. -Greater ReHo values correspond to greater synchrony among BOLD activity patterns measured in a local neighborhood of voxels, with neighborhood size determined by a user-specified radius of voxels. +Regional Homogeneity (ReHo) is a measure of local temporal uniformity in the BOLD signal computed at each voxel of the processed image. +Greater ReHo values correspond to greater synchrony among BOLD activity patterns measured in a local neighborhood of voxels, with neighborhood size determined by a user-specified radius of voxels. ReHo is calculated as the coefficient of concordance among all voxels in a sphere centered on the target voxel. -For NIfTIs, ReHo is always calculated via AFNI’s 3dReho with 27 voxels in each neighborhood, using Kendall's coefficient of concordance (KCC). -For CIFTIs, the left and right hemisphere are extracted into GIFTI format via Connectome Workbench’s CIFTISeparateMetric. Next, the mesh adjacency matrix is obtained,and Kendall's coefficient of concordance (KCC) is calculated, with each vertex having four neighbors. -For subcortical voxels in the CIFTIs, 3dReho is used with the same parameters that are used for NIfTIs. +For NIfTIs, ReHo is always calculated via AFNI’s 3dReho with 27 voxels in each neighborhood, using Kendall's coefficient of concordance (KCC). +For CIFTIs, the left and right hemisphere are extracted into GIFTI format via Connectome Workbench’s CIFTISeparateMetric. Next, the mesh adjacency matrix is obtained,and Kendall's coefficient of concordance (KCC) is calculated, with each vertex having four neighbors. +For subcortical voxels in the CIFTIs, 3dReho is used with the same parameters that are used for NIfTIs. Parcellation and functional connectivity estimation [OPTIONAL] diff --git a/xcp_d/cli/parser.py b/xcp_d/cli/parser.py index 73a522901..43d4af30c 100644 --- a/xcp_d/cli/parser.py +++ b/xcp_d/cli/parser.py @@ -8,6 +8,7 @@ import os import sys +import xcp_d.cli.parser_utils as types from xcp_d import config @@ -19,7 +20,6 @@ def _build_parser(): from packaging.version import Version - from xcp_d.cli.parser_utils import _float_or_auto, _int_or_auto, _restricted_float from xcp_d.cli.version import check_latest, is_flagged from xcp_d.utils.atlas import select_atlases @@ -257,7 +257,7 @@ def _bids_filter(value, parser): "--dummy_scans", dest="dummy_scans", default=0, - type=_int_or_auto, + type=types._int_or_auto, metavar="{{auto,INT}}", help=( "Number of volumes to remove from the beginning of each run. " @@ -399,7 +399,7 @@ def _bids_filter(value, parser): "--head-radius", "--head_radius", default=50, - type=_float_or_auto, + type=types._float_or_auto, help=( "Head radius used to calculate framewise displacement, in mm. " "The default value is 50 mm, which is recommended for adults. " @@ -412,15 +412,67 @@ def _bids_filter(value, parser): "-f", "--fd-thresh", "--fd_thresh", - default=0.3, + default=[0.3, 0], + metavar="FLOAT", type=float, + nargs="+", help=( - "Framewise displacement threshold for censoring. " - "Any volumes with an FD value greater than the threshold will be removed from the " - "denoised BOLD data. " + "Framewise displacement thresholds for censoring. " + "This may be a single value or a pair of values. " + "If two values are provided, the first one will determine which volumes are " + "removed from the denoised BOLD data, and the second one will determine which " + "volumes are removed from the interpolated BOLD data. " "A threshold of <=0 will disable censoring completely." ), ) + g_censor.add_argument( + "--dvars-thresh", + "--dvars_thresh", + default=[0, 0], + metavar="FLOAT", + type=float, + nargs="+", + help=( + "DVARS threshold for censoring. " + "This may be a single value or a pair of values. " + "If two values are provided, the first one will determine which volumes are " + "removed from the denoised BOLD data, and the second one will determine which " + "volumes are removed from the interpolated BOLD data. " + "A threshold of <=0 will disable censoring completely." + ), + ) + g_censor.add_argument( + "--censor-before", + "--censor_before", + default=[0, 0], + metavar="INT", + nargs="+", + type=int, + help="The number of volumes to remove before any outlier volumes.", + ) + g_censor.add_argument( + "--censor-after", + "--censor_after", + default=[0, 0], + metavar="INT", + nargs="+", + type=int, + help="The number of volumes to remove after any outlier volumes.", + ) + g_censor.add_argument( + "--censor-between", + "--censor_between", + default=[0, 0], + metavar="INT", + nargs="+", + type=int, + help=( + "If any short sets of contiguous non-outliers are found between outliers, " + "this parameter will remove them. " + "For example, if the value is set to 1, then any cases where only one non-outlier " + "volume exists between two outlier volumes will be censored." + ), + ) g_censor.add_argument( "--min-time", "--min_time", @@ -519,7 +571,7 @@ def _bids_filter(value, parser): "--min_coverage", required=False, default=0.5, - type=_restricted_float, + type=types._restricted_float, help=( "Coverage threshold to apply to parcels in each atlas. " "Any parcels with lower coverage than the threshold will be replaced with NaNs. " @@ -896,10 +948,23 @@ def _validate_parameters(opts, build_log, parser): build_log.warning("Bandpass filtering is disabled. ALFF outputs will not be generated.") # Scrubbing parameters - if opts.fd_thresh <= 0 and opts.min_time > 0: + opts.fd_thresh = types._check_censoring_thresholds(opts.fd_thresh, parser, "--fd-thresh") + opts.dvars_thresh = types._check_censoring_thresholds( + opts.dvars_thresh, parser, "--dvars-thresh" + ) + opts.censor_before = types._check_censoring_numbers( + opts.censor_before, parser, "--censor-before" + ) + opts.censor_after = types._check_censoring_numbers(opts.censor_after, parser, "--censor-after") + opts.censor_between = types._check_censoring_numbers( + opts.censor_between, parser, "--censor-between" + ) + + nocensor = all(t <= 0 for t in opts.fd_thresh + opts.dvars_thresh) + if nocensor and opts.min_time > 0: ignored_params = "\n\t".join(["--min-time"]) build_log.warning( - "Framewise displacement-based scrubbing is disabled. " + "Censoring is disabled. " f"The following parameters will have no effect:\n\t{ignored_params}" ) opts.min_time = 0 diff --git a/xcp_d/cli/parser_utils.py b/xcp_d/cli/parser_utils.py index a30fdacfd..d6719ba7d 100644 --- a/xcp_d/cli/parser_utils.py +++ b/xcp_d/cli/parser_utils.py @@ -73,6 +73,52 @@ def _float_or_auto(string, is_parser=True): return floatarg +def _one_or_two_ints(string, is_parser=True): + """Check if argument is one or two integers >= 0.""" + error = argparse.ArgumentTypeError if is_parser else ValueError + try: + ints = [int(i) for i in string.split()] + except ValueError: + msg = "Argument must be one or two integers." + raise error(msg) + + if len(ints) == 1: + ints.append(0) + elif len(ints) != 2: + raise error("Argument must be one or two integers.") + + if ints[0] < 0 or ints[1] < 0: + raise error( + "Int arguments must be nonnegative. " + "If you wish to disable censoring, set the value to 0." + ) + + return ints + + +def _one_or_two_floats(string, is_parser=True): + """Check if argument is one or two floats >= 0.""" + error = argparse.ArgumentTypeError if is_parser else ValueError + try: + floats = [float(i) for i in string.split()] + except ValueError: + msg = "Argument must be one or two floats." + raise error(msg) + + if len(floats) == 1: + floats.append(0.0) + elif len(floats) != 2: + raise error("Argument must be one or two floats.") + + if floats[0] < 0 or floats[1] < 0: + raise error( + "Float arguments must be nonnegative. " + "If you wish to disable censoring, set the value to 0." + ) + + return floats + + def _restricted_float(x): """From https://stackoverflow.com/a/12117065/2589328.""" try: @@ -101,3 +147,32 @@ def __call__(self, parser, namespace, values, option_string=None): # noqa: U100 f"{self.__version__}. " ) setattr(namespace, self.dest, values) + + +def _check_censoring_thresholds(values, parser, arg_name): + if len(values) == 1: + values = [values[0], values[0]] + elif len(values) > 2: + parser.error( + f"Invalid number of values for '{arg_name}': {len(values)}. " + "Please provide either one or two values." + ) + + if values[0] > 0 and values[1] > values[0]: + parser.error( + f"The second value ({values[1]}) for '{arg_name}' must be less than or equal to the " + f"first value ({values[0]})." + ) + + return values + + +def _check_censoring_numbers(values, parser, arg_name): + if len(values) == 1: + values = [values[0], 0] + elif len(values) > 2: + parser.error( + f"Invalid number of values for '{arg_name}': {len(values)}. " + "Please provide either one or two values." + ) + return values diff --git a/xcp_d/config.py b/xcp_d/config.py index f3fe05a38..dff2aa275 100644 --- a/xcp_d/config.py +++ b/xcp_d/config.py @@ -548,6 +548,14 @@ class workflow(_Config): """Radius of the head in mm.""" fd_thresh = None """Framewise displacement threshold for censoring.""" + dvars_thresh = None + """DVARS threshold for censoring.""" + censor_before = None + """Number of volumes to remove before FD or DVARS outliers.""" + censor_after = None + """Number of volumes to remove after FD or DVARS outliers.""" + censor_between = None + """Number of volumes to remove between any FD or DVARS outliers.""" min_time = None """Post-scrubbing threshold to apply to individual runs in the dataset.""" bandpass_filter = True diff --git a/xcp_d/data/xcp_d_bids_config.json b/xcp_d/data/xcp_d_bids_config.json index 80039531e..fa8288f30 100755 --- a/xcp_d/data/xcp_d_bids_config.json +++ b/xcp_d/data/xcp_d_bids_config.json @@ -37,7 +37,7 @@ "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}][_seg-{segmentation}][_res-{res}][_den-{den}]_stat-{statistic}[_desc-{desc}]_{suffix}{extension<.tsv|.json>|.tsv}", "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_desc-{desc}]_{suffix|design}{extension<.tsv|.json>|.tsv}", "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}][_seg-{segmentation}][_res-{res}][_den-{den}]_stat-{statistic}[_desc-{desc}]_{suffix}{extension<.tsv|.json>|.tsv}", - "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_desc-{desc}]_{suffix}{extension<.json|.tsv>|.tsv}", + "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_desc-{desc}]_{suffix}{extension<.json|.tsv>|.tsv}", "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}][_res-{res}][_den-{den}][_desc-{desc}]_{suffix}{extension<.tsv|.json>|.tsv}", "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_desc-{desc}]_{suffix}{extension<.hdf5>|.hdf5}", "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}][_den-{den}][_hemi-{hemi}][_desc-{desc}]_{suffix}{extension<.dtseries.nii|.json>}", @@ -47,7 +47,7 @@ "sub-{subject}[/ses-{session}]/{datatype|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}]_seg-{segmentation}[_den-{den}][_hemi-{hemi}]_stat-{statistic}[_desc-{desc}]_{suffix}{extension<.pconn.nii|.json>}", "sub-{subject}/{datatype}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_run-{run}][_space-{space}][_cohort-{cohort}][_seg-{segmentation}][_desc-{desc}]_{suffix}{extension<.html|.svg|.png>}", "sub-{subject}/{datatype}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_run-{run}][_space-{space}][_cohort-{cohort}][_seg-{segmentation}][_desc-{desc}]_{suffix}{extension<.html|.svg|.png>}", - "sub-{subject}/{datatype}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}][_desc-{desc}]_{suffix}{extension<.html|.svg|.png>}", + "sub-{subject}/{datatype}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_space-{space}][_cohort-{cohort}][_desc-{desc}]_{suffix}{extension<.html|.svg|.png>}", "sub-{subject}/{datatype}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_desc-{desc}]_{suffix|design}{extension<.html|.svg|.png>}" ] } diff --git a/xcp_d/interfaces/censoring.py b/xcp_d/interfaces/censoring.py index 42fcd1338..20a6924fd 100644 --- a/xcp_d/interfaces/censoring.py +++ b/xcp_d/interfaces/censoring.py @@ -1,5 +1,6 @@ """Interfaces for the post-processing workflows.""" +import json import os import nibabel as nb @@ -17,8 +18,9 @@ from xcp_d.utils.confounds import ( _infer_dummy_scans, _modify_motion_filter, + calculate_outliers, + filter_motion, load_confound_matrix, - load_motion, ) from xcp_d.utils.filemanip import fname_presuffix from xcp_d.utils.modified_data import _drop_dummy_scans, compute_fd @@ -28,7 +30,6 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): - bold_file = File(exists=True, mandatory=True, desc="Either cifti or nifti ") dummy_scans = traits.Either( traits.Int, "auto", @@ -38,7 +39,8 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): "calculated in an earlier workflow from dummy_scans." ), ) - confounds_file = traits.Either( + bold_file = File(exists=True, mandatory=True, desc="Either cifti or nifti ") + design_matrix = traits.Either( File(exists=True), None, mandatory=True, @@ -47,12 +49,12 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): "May be None if denoising is disabled." ), ) - fmriprep_confounds_file = File( + full_confounds = File( exists=True, mandatory=True, desc="fMRIPrep confounds tsv. Used for motion-based censoring.", ) - motion_file = File( + modified_full_confounds = File( exists=True, mandatory=True, desc="Confounds file containing only filtered motion parameters.", @@ -65,12 +67,12 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): class _RemoveDummyVolumesOutputSpec(TraitedSpec): - confounds_file_dropped_TR = traits.Either( + design_matrix_dropped_TR = traits.Either( File(exists=True), None, desc="TSV file with selected confounds for denoising, after removing TRs.", ) - fmriprep_confounds_file_dropped_TR = File( + full_confounds_dropped_TR = File( exists=True, desc="fMRIPrep confounds tsv after removing TRs. Used for motion-based censoring.", ) @@ -79,7 +81,7 @@ class _RemoveDummyVolumesOutputSpec(TraitedSpec): desc="bold or cifti with volumes dropped", ) dummy_scans = traits.Int(desc="Number of volumes dropped.") - motion_file_dropped_TR = File( + modified_full_confounds_dropped_TR = File( exists=True, desc="Confounds file containing only filtered motion parameters.", ) @@ -102,7 +104,7 @@ class RemoveDummyVolumes(SimpleInterface): def _run_interface(self, runtime): dummy_scans = _infer_dummy_scans( dummy_scans=self.inputs.dummy_scans, - confounds_file=self.inputs.fmriprep_confounds_file, + confounds_file=self.inputs.full_confounds, ) self._results["dummy_scans"] = dummy_scans @@ -111,12 +113,12 @@ def _run_interface(self, runtime): if dummy_scans == 0: # write the output out self._results["bold_file_dropped_TR"] = self.inputs.bold_file - self._results["fmriprep_confounds_file_dropped_TR"] = ( - self.inputs.fmriprep_confounds_file + self._results["full_confounds_dropped_TR"] = self.inputs.full_confounds + self._results["modified_full_confounds_dropped_TR"] = ( + self.inputs.modified_full_confounds ) - self._results["confounds_file_dropped_TR"] = self.inputs.confounds_file - self._results["motion_file_dropped_TR"] = self.inputs.motion_file self._results["temporal_mask_dropped_TR"] = self.inputs.temporal_mask + self._results["design_matrix_dropped_TR"] = self.inputs.design_matrix return runtime # get the file names to output to @@ -126,13 +128,13 @@ def _run_interface(self, runtime): suffix="_dropped", use_ext=True, ) - self._results["fmriprep_confounds_file_dropped_TR"] = fname_presuffix( - self.inputs.fmriprep_confounds_file, + self._results["full_confounds_dropped_TR"] = fname_presuffix( + self.inputs.full_confounds, newpath=runtime.cwd, - suffix="_fmriprep_dropped", + suffix="_dropped", use_ext=True, ) - self._results["motion_file_dropped_TR"] = fname_presuffix( + self._results["modified_full_confounds_dropped_TR"] = fname_presuffix( self.inputs.bold_file, suffix="_motion_dropped.tsv", newpath=os.getcwd(), @@ -150,36 +152,38 @@ def _run_interface(self, runtime): dropped_image.to_filename(self._results["bold_file_dropped_TR"]) # Drop the first N rows from the pandas dataframe - fmriprep_confounds_df = pd.read_table(self.inputs.fmriprep_confounds_file) - fmriprep_confounds_df_dropped = fmriprep_confounds_df.drop(np.arange(dummy_scans)) - fmriprep_confounds_df_dropped.to_csv( - self._results["fmriprep_confounds_file_dropped_TR"], + full_confounds_df = pd.read_table(self.inputs.full_confounds) + full_confounds_df_dropped = full_confounds_df.drop(np.arange(dummy_scans)) + full_confounds_df_dropped.to_csv( + self._results["full_confounds_dropped_TR"], sep="\t", index=False, ) # Drop the first N rows from the confounds file - self._results["confounds_file_dropped_TR"] = None - if self.inputs.confounds_file: - self._results["confounds_file_dropped_TR"] = fname_presuffix( + self._results["design_matrix_dropped_TR"] = None + if self.inputs.design_matrix: + self._results["design_matrix_dropped_TR"] = fname_presuffix( self.inputs.bold_file, suffix="_selected_confounds_dropped.tsv", newpath=os.getcwd(), use_ext=False, ) - confounds_df = pd.read_table(self.inputs.confounds_file) + confounds_df = pd.read_table(self.inputs.design_matrix) confounds_df_dropped = confounds_df.drop(np.arange(dummy_scans)) confounds_df_dropped.to_csv( - self._results["confounds_file_dropped_TR"], + self._results["design_matrix_dropped_TR"], sep="\t", index=False, ) # Drop the first N rows from the motion file - motion_df = pd.read_table(self.inputs.motion_file) - motion_df_dropped = motion_df.drop(np.arange(dummy_scans)) - motion_df_dropped.to_csv( - self._results["motion_file_dropped_TR"], + modified_full_confounds_df = pd.read_table(self.inputs.modified_full_confounds) + modified_full_confounds_df_dropped = modified_full_confounds_df.drop( + np.arange(dummy_scans) + ) + modified_full_confounds_df_dropped.to_csv( + self._results["modified_full_confounds_dropped_TR"], sep="\t", index=False, ) @@ -228,7 +232,7 @@ class Censor(SimpleInterface): def _run_interface(self, runtime): # Read in temporal mask censoring_df = pd.read_table(self.inputs.temporal_mask) - motion_outliers = censoring_df["framewise_displacement"].to_numpy() + motion_outliers = censoring_df["interpolation"].to_numpy() if np.sum(motion_outliers) == 0: # No censoring needed self._results["censored_denoised_bold"] = self.inputs.in_file @@ -338,7 +342,7 @@ def _run_interface(self, runtime): use_ext=True, ) rng = np.random.default_rng(self.inputs.random_seed) - low_motion_idx = censoring_df.loc[censoring_df["framewise_displacement"] != 1].index.values + low_motion_idx = censoring_df.loc[censoring_df["interpolation"] != 1].index.values for exact_scan in self.inputs.exact_scans: random_censor = rng.choice(low_motion_idx, size=exact_scan, replace=False) column_name = f"exact_{exact_scan}" @@ -362,36 +366,19 @@ def _run_interface(self, runtime): return runtime -class _GenerateConfoundsInputSpec(BaseInterfaceInputSpec): - in_file = File( - exists=True, - mandatory=True, - desc="BOLD file after denoising, interpolation, and filtering", - ) - params = traits.Str(mandatory=True, desc="Parameter set for regression.") - TR = traits.Float(mandatory=True, desc="Repetition time in seconds") - fd_thresh = traits.Float( - mandatory=False, - default_value=0.3, - desc="Framewise displacement threshold. All values above this will be dropped.", - ) +class _ModifyConfoundsInputSpec(BaseInterfaceInputSpec): head_radius = traits.Float(mandatory=False, default_value=50, desc="Head radius in mm ") - fmriprep_confounds_file = File( + TR = traits.Float(mandatory=True, desc="Repetition time in seconds") + full_confounds = File( exists=True, mandatory=True, desc="fMRIPrep confounds tsv.", ) - fmriprep_confounds_json = File( + full_confounds_json = File( exists=True, mandatory=True, desc="fMRIPrep confounds json.", ) - custom_confounds_file = traits.Either( - None, - File(exists=True), - mandatory=True, - desc="Custom confounds tsv.", - ) motion_filter_type = traits.Either( None, traits.Str, @@ -416,48 +403,29 @@ class _GenerateConfoundsInputSpec(BaseInterfaceInputSpec): ) -class _GenerateConfoundsOutputSpec(TraitedSpec): - filtered_confounds_file = File( +class _ModifyConfoundsOutputSpec(TraitedSpec): + modified_full_confounds = File( exists=True, - desc=( - "The original fMRIPrep confounds, with the motion parameters and their Volterra " - "expansion regressors replaced with filtered versions." - ), - ) - confounds_file = traits.Either( - File(exists=True), - None, - desc="The selected confounds. This may include custom confounds as well.", + desc="Modified confounds file.", ) - confounds_metadata = traits.Dict(desc="Metadata associated with the confounds_file output.") - motion_file = File( + modified_full_confounds_metadata = traits.Dict( exists=True, - desc="The filtered motion parameters.", - ) - motion_metadata = traits.Dict(desc="Metadata associated with the filtered_motion output.") - temporal_mask = File( - exists=True, - desc=( - "Temporal mask; all values above fd_thresh set to 1. " - "This is a TSV file with one column: 'framewise_displacement'." - ), - ) - temporal_mask_metadata = traits.Dict( - desc="Metadata associated with the temporal_mask output.", + desc="Metadata associated with the modified_full_confounds output.", ) -class GenerateConfounds(SimpleInterface): - """Load, consolidate, and filter confounds. +class ModifyConfounds(SimpleInterface): + """Apply motion filter to confounds and recalculate framewise displacement.""" - Also, generate the temporal mask. - """ - - input_spec = _GenerateConfoundsInputSpec - output_spec = _GenerateConfoundsOutputSpec + input_spec = _ModifyConfoundsInputSpec + output_spec = _ModifyConfoundsOutputSpec def _run_interface(self, runtime): - fmriprep_confounds_df = pd.read_table(self.inputs.fmriprep_confounds_file) + full_confounds_df = pd.read_table(self.inputs.full_confounds) + with open(self.inputs.full_confounds_json, "r") as f: + confounds_metadata = json.load(f) + + # Modify motion filter if requested parameters are below the Nyquist frequency band_stop_min_adjusted, band_stop_max_adjusted, _ = _modify_motion_filter( motion_filter_type=self.inputs.motion_filter_type, band_stop_min=self.inputs.band_stop_min, @@ -465,8 +433,9 @@ def _run_interface(self, runtime): TR=self.inputs.TR, ) - motion_df = load_motion( - fmriprep_confounds_df.copy(), + # Filter motion parameters + full_confounds_df = filter_motion( + confounds_df=full_confounds_df, TR=self.inputs.TR, motion_filter_type=self.inputs.motion_filter_type, motion_filter_order=self.inputs.motion_filter_order, @@ -474,52 +443,40 @@ def _run_interface(self, runtime): band_stop_max=band_stop_max_adjusted, ) - # Add in framewise displacement - fd_timeseries = compute_fd( - confound=motion_df, + # Calculate and add in framewise displacement + full_confounds_df["framewise_displacement"] = compute_fd( + confound=full_confounds_df, head_radius=self.inputs.head_radius, ) - motion_df["framewise_displacement"] = fd_timeseries - # A file to house the modified fMRIPrep confounds. - self._results["filtered_confounds_file"] = fname_presuffix( - self.inputs.fmriprep_confounds_file, + # Write out the modified fMRIPrep confounds. + self._results["modified_full_confounds"] = fname_presuffix( + self.inputs.full_confounds, suffix="_filtered", newpath=runtime.cwd, use_ext=True, ) - - # Replace original motion parameters with filtered versions - for col in motion_df.columns.tolist(): - # Check that the column already exists, in case fMRIPrep changes column names. - # We don't want to use the original motion parameters accidentally. - if col not in fmriprep_confounds_df.columns: - raise ValueError( - f"Column '{col}' not found in confounds " - f"{self.inputs.fmriprep_confounds_file}. " - "Please open an issue on GitHub." - ) - - fmriprep_confounds_df[col] = motion_df[col] - - fmriprep_confounds_df.to_csv( - self._results["filtered_confounds_file"], + full_confounds_df.to_csv( + self._results["modified_full_confounds"], sep="\t", index=False, ) - # Load nuisance regressors, but use filtered motion parameters. - confounds_df, confounds_metadata = load_confound_matrix( - params=self.inputs.params, - img_file=self.inputs.in_file, - confounds_file=self._results["filtered_confounds_file"], - confounds_json_file=self.inputs.fmriprep_confounds_json, - custom_confounds=self.inputs.custom_confounds_file, - ) + # Add filtering info to affected columns' metadata. + # This includes any columns that start with any of the following strings. + motion_columns = [ + "trans_x", + "trans_y", + "trans_z", + "rot_x", + "rot_y", + "rot_z", + "framewise_displacement", + ] + for col in full_confounds_df.columns.tolist(): + if not any(col.startswith(mc) for mc in motion_columns): + continue - # Compile motion metadata from confounds metadata, adding in filtering info - motion_metadata = {} - for col in motion_df.columns.tolist(): col_metadata = confounds_metadata.get(col, {}) if col == "framewise_displacement": col_metadata["Description"] = ( @@ -551,44 +508,253 @@ def _run_interface(self, runtime): } col_metadata["SoftwareFilters"] = filters - motion_metadata[col] = col_metadata + confounds_metadata[col] = col_metadata + + self._results["modified_full_confounds_metadata"] = confounds_metadata + + return runtime + + +class _GenerateTemporalMaskInputSpec(BaseInterfaceInputSpec): + full_confounds = File( + exists=True, + mandatory=True, + desc="fMRIPrep confounds tsv.", + ) + fd_thresh = traits.List( + traits.Float, + minlen=2, + maxlen=2, + desc="Framewise displacement threshold. All values above this will be dropped.", + ) + dvars_thresh = traits.List( + traits.Float, + minlen=2, + maxlen=2, + desc="DVARS threshold. All values above this will be dropped.", + ) + censor_before = traits.List( + traits.Int, + minlen=2, + maxlen=2, + desc="Number of volumes to censor before each FD or DVARS outlier volume.", + ) + censor_after = traits.List( + traits.Int, + minlen=2, + maxlen=2, + desc="Number of volumes to censor after each FD or DVARS outlier volume.", + ) + censor_between = traits.List( + traits.Int, + minlen=2, + maxlen=2, + desc="Number of volumes to censor between each FD or DVARS outlier volume.", + ) + + +class _GenerateTemporalMaskOutputSpec(TraitedSpec): + temporal_mask = File( + exists=True, + desc=( + "Temporal mask; all values above fd_thresh set to 1. " + "This is a TSV file with one column: 'framewise_displacement'." + ), + ) + temporal_mask_metadata = traits.Dict( + desc="Metadata associated with the temporal_mask output.", + ) + + +class GenerateTemporalMask(SimpleInterface): + """Generate the temporal mask.""" + + input_spec = _GenerateTemporalMaskInputSpec + output_spec = _GenerateTemporalMaskOutputSpec + + def _run_interface(self, runtime): + full_confounds_df = pd.read_table(self.inputs.full_confounds) + + outliers_df = calculate_outliers( + confounds=full_confounds_df, + fd_thresh=self.inputs.fd_thresh, + dvars_thresh=self.inputs.dvars_thresh, + before=self.inputs.censor_before, + after=self.inputs.censor_after, + between=self.inputs.censor_between, + ) + + self._results["temporal_mask"] = fname_presuffix( + "outliers.tsv", + newpath=runtime.cwd, + use_ext=True, + ) + outliers_df = outliers_df.astype(int) + outliers_df.to_csv( + self._results["temporal_mask"], + index=False, + header=True, + sep="\t", + ) + + outliers_metadata = { + "framewise_displacement": { + "Description": "Outlier time series based on framewise displacement.", + "Levels": { + "0": "Non-outlier volume", + "1": "Outlier volume", + }, + "Threshold": self.inputs.fd_thresh[0], + }, + "dvars": { + "Description": "Outlier time series based on DVARS.", + "Levels": { + "0": "Non-outlier volume", + "1": "Outlier volume", + }, + "Threshold": self.inputs.dvars_thresh[0], + }, + "denoising": { + "Description": ( + "Outlier time series based on framewise displacement and DVARS. " + "Any volumes marked as outliers by either metric are marked as outliers in " + "this column. " + "This initial set of outliers is then expanded to mark " + f"{self.inputs.censor_before[0]} volumes before and " + f"{self.inputs.censor_after[0]} volumes after each outlier volume. " + "Finally, any sequences of non-outliers shorter than or equal to " + f"{self.inputs.censor_between[0]} volumes were marked as outliers." + ), + "Levels": { + "0": "Non-outlier volume", + "1": "Outlier volume", + }, + }, + "framewise_displacement_interpolation": { + "Description": "Outlier time series based on framewise displacement.", + "Levels": { + "0": "Non-outlier volume", + "1": "Outlier volume", + }, + "Threshold": self.inputs.fd_thresh[1], + }, + "dvars_interpolation": { + "Description": "Outlier time series based on DVARS.", + "Levels": { + "0": "Non-outlier volume", + "1": "Outlier volume", + }, + "Threshold": self.inputs.dvars_thresh[1], + }, + "interpolation": { + "Description": ( + "Outlier time series based on framewise displacement and DVARS. " + "Any volumes marked as outliers by either metric, or the 'denoising' column', " + "are marked as outliers in this column. " + "This initial set of outliers is then expanded to mark " + f"{self.inputs.censor_before[1]} volumes before and " + f"{self.inputs.censor_after[1]} volumes after each outlier volume. " + "Finally, any sequences of non-outliers shorter than or equal to " + f"{self.inputs.censor_between[1]} volumes were marked as outliers." + ), + "Levels": { + "0": "Non-outlier volume", + "1": "Outlier volume", + }, + }, + } + self._results["temporal_mask_metadata"] = outliers_metadata + + return runtime - self._results["motion_metadata"] = motion_metadata - if confounds_df is not None: - # Update confounds metadata with modified motion metadata - for col in motion_df.columns.tolist(): - if col in confounds_df.columns: - base_metadata = confounds_metadata.get(col, {}) - base_metadata.update(motion_metadata[col]) - confounds_metadata[col] = base_metadata +class _GenerateDesignMatrixInputSpec(BaseInterfaceInputSpec): + in_file = File( + exists=True, + mandatory=True, + desc="BOLD file after denoising, interpolation, and filtering", + ) + params = traits.Str(mandatory=True, desc="Parameter set for regression.") + full_confounds = File( + exists=True, + mandatory=True, + desc="fMRIPrep confounds tsv.", + ) + full_confounds_metadata = traits.Dict( + mandatory=True, + desc="fMRIPrep confounds metadata.", + ) + custom_confounds_file = traits.Either( + None, + File(exists=True), + mandatory=True, + desc="Custom confounds tsv.", + ) + +class _GenerateDesignMatrixOutputSpec(TraitedSpec): + design_matrix = traits.Either( + File(exists=True), + None, + desc="The selected confounds for denoising. This may include custom confounds as well.", + ) + design_matrix_metadata = traits.Dict(desc="Metadata associated with the design_matrix output.") + + +class GenerateDesignMatrix(SimpleInterface): + """Load, consolidate, and potentially orthogonalize confounds.""" + + input_spec = _GenerateDesignMatrixInputSpec + output_spec = _GenerateDesignMatrixOutputSpec + + def _run_interface(self, runtime): + full_confounds_json = fname_presuffix( + self.inputs.full_confounds, + suffix=".json", + newpath=runtime.cwd, + use_ext=False, + ) + with open(full_confounds_json, "w") as fo: + json.dump(self.inputs.full_confounds_metadata, fo, sort_keys=True, indent=4) + + # Load nuisance regressors, but use filtered motion parameters. + design_matrix_df, design_matrix_metadata = load_confound_matrix( + params=self.inputs.params, + img_file=self.inputs.in_file, + confounds_file=self.inputs.full_confounds, + confounds_json_file=full_confounds_json, + custom_confounds=self.inputs.custom_confounds_file, + ) + + if design_matrix_df is not None: # Orthogonalize full nuisance regressors w.r.t. any signal regressors - signal_columns = [c for c in confounds_df.columns if c.startswith("signal__")] + signal_columns = [c for c in design_matrix_df.columns if c.startswith("signal__")] if signal_columns: LOGGER.warning( "Signal columns detected. " "Orthogonalizing nuisance columns w.r.t. the following signal columns: " f"{', '.join(signal_columns)}" ) - noise_columns = [c for c in confounds_df.columns if not c.startswith("signal__")] + noise_columns = [ + c for c in design_matrix_df.columns if not c.startswith("signal__") + ] orth_cols = [f"{c}_orth" for c in noise_columns] - orth_confounds_df = pd.DataFrame( - index=confounds_df.index, + orth_design_matrix_df = pd.DataFrame( + index=design_matrix_df.index, columns=orth_cols, ) # Do the orthogonalization - signal_regressors = confounds_df[signal_columns].to_numpy() - noise_regressors = confounds_df[noise_columns].to_numpy() + signal_regressors = design_matrix_df[signal_columns].to_numpy() + noise_regressors = design_matrix_df[noise_columns].to_numpy() signal_betas = np.linalg.lstsq(signal_regressors, noise_regressors, rcond=None)[0] pred_noise_regressors = np.dot(signal_regressors, signal_betas) orth_noise_regressors = noise_regressors - pred_noise_regressors # Replace the old data - orth_confounds_df.loc[:, orth_cols] = orth_noise_regressors - confounds_df = orth_confounds_df + orth_design_matrix_df.loc[:, orth_cols] = orth_noise_regressors + design_matrix_df = orth_design_matrix_df for col in noise_columns: desc_str = ( @@ -598,69 +764,26 @@ def _run_interface(self, runtime): ) col_metadata = {} - if col in confounds_metadata.keys(): - col_metadata = confounds_metadata.pop(col) + if col in design_matrix_metadata.keys(): + col_metadata = design_matrix_metadata.pop(col) if "Description" in col_metadata.keys(): desc_str = f"{col_metadata['Description']} {desc_str}" col_metadata["Description"] = desc_str - confounds_metadata[f"{col}_orth"] = col_metadata + design_matrix_metadata[f"{col}_orth"] = col_metadata - self._results["confounds_metadata"] = confounds_metadata + self._results["design_matrix_metadata"] = design_matrix_metadata # get the output - self._results["motion_file"] = fname_presuffix( - self.inputs.fmriprep_confounds_file, - suffix="_motion", - newpath=runtime.cwd, - use_ext=True, - ) - motion_df.to_csv(self._results["motion_file"], sep="\t", index=False) - - self._results["confounds_file"] = None - if confounds_df is not None: - self._results["confounds_file"] = fname_presuffix( - self.inputs.fmriprep_confounds_file, + self._results["design_matrix"] = None + if design_matrix_df is not None: + self._results["design_matrix"] = fname_presuffix( + self.inputs.full_confounds, suffix="_confounds", newpath=runtime.cwd, use_ext=True, ) - confounds_df.to_csv(self._results["confounds_file"], sep="\t", index=False) - - # Generate temporal mask with all timepoints have FD over threshold set to 1. - outlier_mask = np.zeros(len(fd_timeseries), dtype=int) - if self.inputs.fd_thresh > 0: - outlier_mask[fd_timeseries > self.inputs.fd_thresh] = 1 - else: - LOGGER.info(f"FD threshold set to {self.inputs.fd_thresh}. Censoring is disabled.") - - self._results["temporal_mask"] = fname_presuffix( - "desc-fd_outliers.tsv", - newpath=runtime.cwd, - use_ext=True, - ) - - outliers_df = pd.DataFrame(data=outlier_mask, columns=["framewise_displacement"]) - outliers_df.to_csv( - self._results["temporal_mask"], - index=False, - header=True, - sep="\t", - ) - - outliers_metadata = { - "framewise_displacement": { - "Description": "Outlier time series based on framewise displacement.", - "Levels": { - "0": "Non-outlier volume", - "1": "Outlier volume", - }, - "Threshold": self.inputs.fd_thresh, - } - } - self._results["temporal_mask_metadata"] = outliers_metadata - - return runtime + design_matrix_df.to_csv(self._results["design_matrix"], sep="\t", index=False) class _ReduceCiftiInputSpec(BaseInterfaceInputSpec): diff --git a/xcp_d/interfaces/concatenation.py b/xcp_d/interfaces/concatenation.py index 1f9d3ab1b..b1e21d8d8 100644 --- a/xcp_d/interfaces/concatenation.py +++ b/xcp_d/interfaces/concatenation.py @@ -70,7 +70,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): mandatory=True, desc="Preprocessed BOLD files, after dummy volume removal.", ) - fmriprep_confounds_file = traits.List( + full_confounds = traits.List( traits.Either( File(exists=True), Undefined, @@ -78,7 +78,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): mandatory=True, desc="TSV files with fMRIPrep confounds for individual BOLD runs.", ) - filtered_motion = traits.List( + modified_full_confounds = traits.List( traits.Either( File(exists=True), Undefined, @@ -141,11 +141,11 @@ class _FilterOutFailedRunsOutputSpec(TraitedSpec): File(exists=True), desc="Preprocessed BOLD files, after dummy volume removal.", ) - fmriprep_confounds_file = traits.List( + full_confounds = traits.List( File(exists=True), desc="fMRIPrep confounds files, after dummy volume removal.", ) - filtered_motion = traits.List( + modified_full_confounds = traits.List( File(exists=True), desc="TSV files with filtered motion parameters, used for FD calculation.", ) @@ -210,8 +210,8 @@ def _run_interface(self, runtime): censored_denoised_bold = self.inputs.censored_denoised_bold inputs_to_filter = { "preprocessed_bold": self.inputs.preprocessed_bold, - "fmriprep_confounds_file": self.inputs.fmriprep_confounds_file, - "filtered_motion": self.inputs.filtered_motion, + "full_confounds": self.inputs.full_confounds, + "modified_full_confounds": self.inputs.modified_full_confounds, "temporal_mask": self.inputs.temporal_mask, "denoised_interpolated_bold": self.inputs.denoised_interpolated_bold, "smoothed_denoised_bold": self.inputs.smoothed_denoised_bold, @@ -254,12 +254,12 @@ class _ConcatenateInputsInputSpec(BaseInterfaceInputSpec): mandatory=True, desc="Preprocessed BOLD files, after dummy volume removal.", ) - fmriprep_confounds_file = traits.List( + full_confounds = traits.List( File(exists=True), mandatory=True, desc="TSV files with fMRIPrep confounds for individual BOLD runs.", ) - filtered_motion = traits.List( + modified_full_confounds = traits.List( File(exists=True), mandatory=True, desc="TSV files with filtered motion parameters, used for FD calculation.", @@ -311,11 +311,11 @@ class _ConcatenateInputsOutputSpec(TraitedSpec): exists=True, desc="Concatenated preprocessed BOLD file.", ) - fmriprep_confounds_file = File( + full_confounds = File( exists=True, desc="Concatenated TSV file with fMRIPrep confounds.", ) - filtered_motion = File( + modified_full_confounds = File( exists=True, desc="Concatenated TSV file with filtered motion parameters, used for FD calculation.", ) @@ -364,14 +364,14 @@ def _run_interface(self, runtime): "denoised_interpolated_bold": self.inputs.denoised_interpolated_bold, "smoothed_denoised_bold": self.inputs.smoothed_denoised_bold, "timeseries_ciftis": self.inputs.timeseries_ciftis, - "fmriprep_confounds_file": self.inputs.fmriprep_confounds_file, - "filtered_motion": self.inputs.filtered_motion, + "full_confounds": self.inputs.full_confounds, + "modified_full_confounds": self.inputs.modified_full_confounds, "temporal_mask": self.inputs.temporal_mask, "timeseries": self.inputs.timeseries, } run_index, n_volumes = [], 0 - for run_motion in self.inputs.filtered_motion[:-1]: + for run_motion in self.inputs.modified_full_confounds[:-1]: n_volumes = n_volumes + pd.read_table(run_motion).shape[0] run_index.append(n_volumes) diff --git a/xcp_d/interfaces/connectivity.py b/xcp_d/interfaces/connectivity.py index bb4bf2527..df9b3dc41 100644 --- a/xcp_d/interfaces/connectivity.py +++ b/xcp_d/interfaces/connectivity.py @@ -237,7 +237,7 @@ def correlate_timeseries(timeseries, temporal_mask): correlations_exact = {} if isdefined(temporal_mask): censoring_df = pd.read_table(temporal_mask) - censored_censoring_df = censoring_df.loc[censoring_df["framewise_displacement"] == 0] + censored_censoring_df = censoring_df.loc[censoring_df["interpolation"] == 0] censored_censoring_df.reset_index(drop=True, inplace=True) exact_columns = [c for c in censoring_df.columns if c.startswith("exact_")] for exact_column in exact_columns: diff --git a/xcp_d/interfaces/nilearn.py b/xcp_d/interfaces/nilearn.py index d36542b75..ec00b739f 100644 --- a/xcp_d/interfaces/nilearn.py +++ b/xcp_d/interfaces/nilearn.py @@ -245,7 +245,7 @@ class _DenoiseImageInputSpec(BaseInterfaceInputSpec): "but without any additional censoring." ), ) - confounds_file = traits.Either( + design_matrix = traits.Either( File(exists=True), None, mandatory=True, @@ -305,11 +305,11 @@ def _run_interface(self, runtime): ) # Invert temporal mask, so low-motion volumes are True and high-motion volumes are False. - sample_mask = ~censoring_df["framewise_displacement"].to_numpy().astype(bool) + sample_mask = ~censoring_df["denoising"].to_numpy().astype(bool) confounds_df = None - if self.inputs.confounds_file: - confounds_df = pd.read_table(self.inputs.confounds_file) + if self.inputs.design_matrix: + confounds_df = pd.read_table(self.inputs.design_matrix) if confounds_df.shape[0] != n_volumes: raise ValueError( f"Confounds file has {confounds_df.shape[0]} rows, " @@ -381,11 +381,11 @@ def _run_interface(self, runtime): ) # Invert temporal mask, so low-motion volumes are True and high-motion volumes are False. - sample_mask = ~censoring_df["framewise_displacement"].to_numpy().astype(bool) + sample_mask = ~censoring_df["denoising"].to_numpy().astype(bool) confounds_df = None - if self.inputs.confounds_file: - confounds_df = pd.read_table(self.inputs.confounds_file) + if self.inputs.design_matrix: + confounds_df = pd.read_table(self.inputs.design_matrix) if confounds_df.shape[0] != n_volumes: raise ValueError( f"Confounds file has {confounds_df.shape[0]} rows, " diff --git a/xcp_d/interfaces/plotting.py b/xcp_d/interfaces/plotting.py index 860402e10..ab8e7ad9f 100644 --- a/xcp_d/interfaces/plotting.py +++ b/xcp_d/interfaces/plotting.py @@ -29,7 +29,7 @@ from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec from templateflow.api import get as get_template -from xcp_d.utils.confounds import load_motion +from xcp_d.utils.confounds import filter_motion from xcp_d.utils.filemanip import fname_presuffix from xcp_d.utils.modified_data import compute_fd from xcp_d.utils.plotting import FMRIPlot, plot_fmri_es, surf_data_from_cifti @@ -40,8 +40,8 @@ class _CensoringPlotInputSpec(BaseInterfaceInputSpec): - fmriprep_confounds_file = File(exists=True, mandatory=True, desc="fMRIPrep confounds file.") - filtered_motion = File(exists=True, mandatory=True, desc="Filtered motion file.") + full_confounds = File(exists=True, mandatory=True, desc="fMRIPrep confounds file.") + modified_full_confounds = File(exists=True, mandatory=True, desc="Filtered motion file.") temporal_mask = File( exists=True, mandatory=True, @@ -51,7 +51,36 @@ class _CensoringPlotInputSpec(BaseInterfaceInputSpec): TR = traits.Float(mandatory=True, desc="Repetition Time") head_radius = traits.Float(mandatory=True, desc="Head radius for FD calculation") motion_filter_type = traits.Either(None, traits.Str, mandatory=True) - fd_thresh = traits.Float(mandatory=True, desc="Framewise displacement threshold.") + fd_thresh = traits.List( + traits.Float, + minlen=2, + maxlen=2, + desc="Framewise displacement threshold. All values above this will be dropped.", + ) + dvars_thresh = traits.List( + traits.Float, + minlen=2, + maxlen=2, + desc="DVARS threshold. All values above this will be dropped.", + ) + censor_before = traits.List( + traits.Int, + minlen=2, + maxlen=2, + desc="Number of volumes to censor before each FD or DVARS outlier volume.", + ) + censor_after = traits.List( + traits.Int, + minlen=2, + maxlen=2, + desc="Number of volumes to censor after each FD or DVARS outlier volume.", + ) + censor_between = traits.List( + traits.Int, + minlen=2, + maxlen=2, + desc="Number of volumes to censor between each FD or DVARS outlier volume.", + ) class _CensoringPlotOutputSpec(TraitedSpec): @@ -71,8 +100,8 @@ class CensoringPlot(SimpleInterface): def _run_interface(self, runtime): # Load confound matrix and load motion with motion filtering - confounds_df = pd.read_table(self.inputs.fmriprep_confounds_file) - preproc_motion_df = load_motion( + confounds_df = pd.read_table(self.inputs.full_confounds) + preproc_motion_df = filter_motion( confounds_df.copy(), TR=self.inputs.TR, motion_filter_type=None, @@ -99,7 +128,18 @@ def _run_interface(self, runtime): label="Raw Framewise Displacement", color=palette[0], ) - ax.axhline(self.inputs.fd_thresh, label="Outlier Threshold", color="salmon", alpha=0.5) + ax.axhline( + self.inputs.fd_thresh[0], + label="FD Denoising Outlier Threshold", + color=palette[3], + alpha=0.5, + ) + ax.axhline( + self.inputs.fd_thresh[1], + label="FD Post-Denoising Outlier Threshold", + color=palette[4], + alpha=0.5, + ) dummy_scans = self.inputs.dummy_scans # This check is necessary, because init_prepare_confounds_wf connects dummy_scans from the @@ -121,7 +161,7 @@ def _run_interface(self, runtime): # Compute filtered framewise displacement to plot censoring if self.inputs.motion_filter_type: - filtered_fd_timeseries = pd.read_table(self.inputs.filtered_motion)[ + filtered_fd_timeseries = pd.read_table(self.inputs.modified_full_confounds)[ "framewise_displacement" ] @@ -142,7 +182,8 @@ def _run_interface(self, runtime): ( preproc_fd_timeseries, filtered_fd_timeseries, - [self.inputs.fd_thresh], + [self.inputs.fd_thresh[0]], + [self.inputs.dvars_thresh[0]], ) ) ) @@ -168,18 +209,20 @@ def _run_interface(self, runtime): ymin=vline_ymin, ymax=vline_ymax, label=label, - color=palette[4 + i_col], + color=palette[5 + i_col], alpha=0.8, ) vline_ymax = vline_ymin # Plot motion-censored volumes as vertical lines - tmask_arr = censoring_df["framewise_displacement"].values - assert preproc_fd_timeseries.size == tmask_arr.size + tmask_arr = censoring_df["denoising"].values + if preproc_fd_timeseries.size != tmask_arr.size: + raise ValueError(f"{preproc_fd_timeseries.size} != {tmask_arr.size}") + tmask_idx = np.where(tmask_arr)[0] for i_idx, idx in enumerate(tmask_idx): - label = "Motion-Censored Volumes" if i_idx == 0 else "" + label = "Denoising Censored Volumes" if i_idx == 0 else "" ax.axvline( idx * self.inputs.TR, label=label, @@ -187,6 +230,17 @@ def _run_interface(self, runtime): alpha=0.5, ) + tmask_arr = censoring_df["interpolation"].values - censoring_df["denoising"].values + tmask_idx = np.where(tmask_arr)[0] + for i_idx, idx in enumerate(tmask_idx): + label = "Post-Denoising Censored Volumes" if i_idx == 0 else "" + ax.axvline( + idx * self.inputs.TR, + label=label, + color=palette[4], + alpha=0.5, + ) + ax.set_xlabel("Time (seconds)", fontsize=10) ax.set_ylabel("Movement (millimeters)", fontsize=10) ax.legend(fontsize=10) @@ -226,7 +280,7 @@ class _QCPlotsInputSpec(BaseInterfaceInputSpec): Undefined, desc="Temporal mask", ) - fmriprep_confounds_file = File( + full_confounds = File( exists=True, mandatory=True, desc="fMRIPrep confounds file, after dummy scans removal", @@ -288,8 +342,8 @@ class QCPlots(SimpleInterface): def _run_interface(self, runtime): # Load confound matrix and load motion without motion filtering - confounds_df = pd.read_table(self.inputs.fmriprep_confounds_file) - preproc_motion_df = load_motion( + confounds_df = pd.read_table(self.inputs.full_confounds) + preproc_motion_df = filter_motion( confounds_df.copy(), TR=self.inputs.TR, motion_filter_type=None, @@ -306,7 +360,7 @@ def _run_interface(self, runtime): dummy_scans = self.inputs.dummy_scans if isdefined(self.inputs.temporal_mask): censoring_df = pd.read_table(self.inputs.temporal_mask) - tmask_arr = censoring_df["framewise_displacement"].values + tmask_arr = censoring_df["interpolation"].values else: tmask_arr = np.zeros(preproc_fd_timeseries.size, dtype=int) @@ -571,7 +625,7 @@ class _QCPlotsESInputSpec(BaseInterfaceInputSpec): mandatory=True, desc="Data after filtering, interpolation, etc. This is not plotted.", ) - filtered_motion = File( + modified_full_confounds = File( exists=True, mandatory=True, desc="TSV file with filtered motion parameters.", @@ -655,7 +709,7 @@ def _run_interface(self, runtime): preprocessed_bold=self.inputs.preprocessed_bold, denoised_interpolated_bold=self.inputs.denoised_interpolated_bold, TR=self.inputs.TR, - filtered_motion=self.inputs.filtered_motion, + modified_full_confounds=self.inputs.modified_full_confounds, temporal_mask=self.inputs.temporal_mask, preprocessed_figure=preprocessed_figure, denoised_figure=denoised_figure, diff --git a/xcp_d/interfaces/restingstate.py b/xcp_d/interfaces/restingstate.py index d38aa9a00..5a170b743 100644 --- a/xcp_d/interfaces/restingstate.py +++ b/xcp_d/interfaces/restingstate.py @@ -142,7 +142,7 @@ def _run_interface(self, runtime): if isinstance(temporal_mask, str) and os.path.isfile(temporal_mask): censoring_df = pd.read_table(temporal_mask) # Invert the temporal mask to make retained volumes 1s and dropped volumes 0s. - sample_mask = ~censoring_df["framewise_displacement"].values.astype(bool) + sample_mask = ~censoring_df["interpolation"].values.astype(bool) # compute the ALFF alff_mat = compute_alff( diff --git a/xcp_d/tests/data/test_ds001419_cifti_outputs.txt b/xcp_d/tests/data/test_ds001419_cifti_outputs.txt index 676d7281f..d803a25d7 100644 --- a/xcp_d/tests/data/test_ds001419_cifti_outputs.txt +++ b/xcp_d/tests/data/test_ds001419_cifti_outputs.txt @@ -30,13 +30,13 @@ sub-01/anat/sub-01_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz sub-01/anat/sub-01_space-MNI152NLin2009cAsym_dseg.nii.gz sub-01/func sub-01/func/sub-01_task-imagery_desc-dcan_qc.hdf5 -sub-01/func/sub-01_task-imagery_desc-filtered_motion.json -sub-01/func/sub-01_task-imagery_desc-filtered_motion.tsv +sub-01/func/sub-01_task-imagery_desc-confounds_timeseries.json +sub-01/func/sub-01_task-imagery_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-imagery_outliers.json sub-01/func/sub-01_task-imagery_outliers.tsv sub-01/func/sub-01_task-imagery_run-01_desc-dcan_qc.hdf5 -sub-01/func/sub-01_task-imagery_run-01_desc-filtered_motion.json -sub-01/func/sub-01_task-imagery_run-01_desc-filtered_motion.tsv +sub-01/func/sub-01_task-imagery_run-01_desc-confounds_timeseries.json +sub-01/func/sub-01_task-imagery_run-01_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-imagery_run-01_design.json sub-01/func/sub-01_task-imagery_run-01_design.tsv sub-01/func/sub-01_task-imagery_run-01_outliers.json @@ -123,8 +123,8 @@ sub-01/func/sub-01_task-imagery_run-01_space-fsLR_seg-4S456Parcels_stat-pearsonc sub-01/func/sub-01_task-imagery_run-01_space-fsLR_seg-4S456Parcels_stat-reho_bold.json sub-01/func/sub-01_task-imagery_run-01_space-fsLR_seg-4S456Parcels_stat-reho_bold.tsv sub-01/func/sub-01_task-imagery_run-02_desc-dcan_qc.hdf5 -sub-01/func/sub-01_task-imagery_run-02_desc-filtered_motion.json -sub-01/func/sub-01_task-imagery_run-02_desc-filtered_motion.tsv +sub-01/func/sub-01_task-imagery_run-02_desc-confounds_timeseries.json +sub-01/func/sub-01_task-imagery_run-02_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-imagery_run-02_design.json sub-01/func/sub-01_task-imagery_run-02_design.tsv sub-01/func/sub-01_task-imagery_run-02_outliers.json diff --git a/xcp_d/tests/data/test_ds001419_nifti_outputs.txt b/xcp_d/tests/data/test_ds001419_nifti_outputs.txt index ed392b301..d13063b9e 100644 --- a/xcp_d/tests/data/test_ds001419_nifti_outputs.txt +++ b/xcp_d/tests/data/test_ds001419_nifti_outputs.txt @@ -11,12 +11,12 @@ sub-01/anat sub-01/anat/sub-01_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz sub-01/anat/sub-01_space-MNI152NLin2009cAsym_dseg.nii.gz sub-01/func -sub-01/func/sub-01_task-imagery_desc-filtered_motion.json -sub-01/func/sub-01_task-imagery_desc-filtered_motion.tsv +sub-01/func/sub-01_task-imagery_desc-confounds_timeseries.json +sub-01/func/sub-01_task-imagery_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-imagery_outliers.json sub-01/func/sub-01_task-imagery_outliers.tsv -sub-01/func/sub-01_task-imagery_run-01_desc-filtered_motion.json -sub-01/func/sub-01_task-imagery_run-01_desc-filtered_motion.tsv +sub-01/func/sub-01_task-imagery_run-01_desc-confounds_timeseries.json +sub-01/func/sub-01_task-imagery_run-01_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-imagery_run-01_desc-preproc_design.json sub-01/func/sub-01_task-imagery_run-01_desc-preproc_design.tsv sub-01/func/sub-01_task-imagery_run-01_outliers.json @@ -32,8 +32,8 @@ sub-01/func/sub-01_task-imagery_run-01_space-MNI152NLin2009cAsym_res-2_stat-alff sub-01/func/sub-01_task-imagery_run-01_space-MNI152NLin2009cAsym_res-2_stat-alff_desc-smooth_boldmap.nii.gz sub-01/func/sub-01_task-imagery_run-01_space-MNI152NLin2009cAsym_res-2_stat-reho_boldmap.json sub-01/func/sub-01_task-imagery_run-01_space-MNI152NLin2009cAsym_res-2_stat-reho_boldmap.nii.gz -sub-01/func/sub-01_task-imagery_run-02_desc-filtered_motion.json -sub-01/func/sub-01_task-imagery_run-02_desc-filtered_motion.tsv +sub-01/func/sub-01_task-imagery_run-02_desc-confounds_timeseries.json +sub-01/func/sub-01_task-imagery_run-02_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-imagery_run-02_desc-preproc_design.json sub-01/func/sub-01_task-imagery_run-02_desc-preproc_design.tsv sub-01/func/sub-01_task-imagery_run-02_outliers.json @@ -53,8 +53,8 @@ sub-01/func/sub-01_task-imagery_space-MNI152NLin2009cAsym_res-2_desc-denoisedSmo sub-01/func/sub-01_task-imagery_space-MNI152NLin2009cAsym_res-2_desc-denoisedSmoothed_bold.nii.gz sub-01/func/sub-01_task-imagery_space-MNI152NLin2009cAsym_res-2_desc-denoised_bold.json sub-01/func/sub-01_task-imagery_space-MNI152NLin2009cAsym_res-2_desc-denoised_bold.nii.gz -sub-01/func/sub-01_task-rest_desc-filtered_motion.json -sub-01/func/sub-01_task-rest_desc-filtered_motion.tsv +sub-01/func/sub-01_task-rest_desc-confounds_timeseries.json +sub-01/func/sub-01_task-rest_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-rest_desc-preproc_design.json sub-01/func/sub-01_task-rest_desc-preproc_design.tsv sub-01/func/sub-01_task-rest_outliers.json diff --git a/xcp_d/tests/data/test_fmriprep_without_freesurfer_outputs.txt b/xcp_d/tests/data/test_fmriprep_without_freesurfer_outputs.txt index 344a15219..b9b432c61 100644 --- a/xcp_d/tests/data/test_fmriprep_without_freesurfer_outputs.txt +++ b/xcp_d/tests/data/test_fmriprep_without_freesurfer_outputs.txt @@ -72,8 +72,8 @@ sub-01/func sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-dcan_qc.hdf5 sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-preproc_design.json sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-preproc_design.tsv -sub-01/func/sub-01_task-mixedgamblestask_run-1_motion.json -sub-01/func/sub-01_task-mixedgamblestask_run-1_motion.tsv +sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-confounds_timeseries.json +sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-mixedgamblestask_run-1_outliers.json sub-01/func/sub-01_task-mixedgamblestask_run-1_outliers.tsv sub-01/func/sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_desc-denoisedSmoothed_bold.json @@ -200,8 +200,8 @@ sub-01/func/sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_stat-re sub-01/func/sub-01_task-mixedgamblestask_run-2_desc-dcan_qc.hdf5 sub-01/func/sub-01_task-mixedgamblestask_run-2_desc-preproc_design.json sub-01/func/sub-01_task-mixedgamblestask_run-2_desc-preproc_design.tsv -sub-01/func/sub-01_task-mixedgamblestask_run-2_motion.json -sub-01/func/sub-01_task-mixedgamblestask_run-2_motion.tsv +sub-01/func/sub-01_task-mixedgamblestask_run-2_desc-confounds_timeseries.json +sub-01/func/sub-01_task-mixedgamblestask_run-2_desc-confounds_timeseries.tsv sub-01/func/sub-01_task-mixedgamblestask_run-2_outliers.json sub-01/func/sub-01_task-mixedgamblestask_run-2_outliers.tsv sub-01/func/sub-01_task-mixedgamblestask_run-2_space-MNI152NLin2009cAsym_desc-denoisedSmoothed_bold.json diff --git a/xcp_d/tests/data/test_nibabies_outputs.txt b/xcp_d/tests/data/test_nibabies_outputs.txt index 0e6714205..bee93bad2 100644 --- a/xcp_d/tests/data/test_nibabies_outputs.txt +++ b/xcp_d/tests/data/test_nibabies_outputs.txt @@ -73,8 +73,8 @@ sub-01/ses-1mo/func sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_desc-dcan_qc.hdf5 sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_desc-preproc_design.json sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_desc-preproc_design.tsv -sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_motion.json -sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_motion.tsv +sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_desc-confounds_timeseries.json +sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_desc-confounds_timeseries.tsv sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_space-MNIInfant_cohort-1_desc-denoised_bold.json sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_space-MNIInfant_cohort-1_desc-denoised_bold.nii.gz sub-01/ses-1mo/func/sub-01_ses-1mo_task-rest_acq-PA_run-001_space-MNIInfant_cohort-1_desc-linc_qc.tsv diff --git a/xcp_d/tests/data/test_pnc_cifti_outputs.txt b/xcp_d/tests/data/test_pnc_cifti_outputs.txt index d1013b2f5..0f5e422ca 100644 --- a/xcp_d/tests/data/test_pnc_cifti_outputs.txt +++ b/xcp_d/tests/data/test_pnc_cifti_outputs.txt @@ -36,8 +36,8 @@ sub-1648798153/ses-PNC1/anat/sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den- sub-1648798153/ses-PNC1/anat/sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_thickness.dscalar.nii sub-1648798153/ses-PNC1/func sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-dcan_qc.hdf5 -sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-filtered_motion.json -sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-filtered_motion.tsv +sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-confounds_timeseries.json +sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-confounds_timeseries.tsv sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_design.json sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_design.tsv sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-rest_acq-singleband_outliers.json diff --git a/xcp_d/tests/data/test_pnc_cifti_t2wonly_outputs.txt b/xcp_d/tests/data/test_pnc_cifti_t2wonly_outputs.txt index 3a571b567..d02637b69 100644 --- a/xcp_d/tests/data/test_pnc_cifti_t2wonly_outputs.txt +++ b/xcp_d/tests/data/test_pnc_cifti_t2wonly_outputs.txt @@ -42,8 +42,8 @@ sub-1648798153/ses-PNC1/anat/sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_seg- sub-1648798153/ses-PNC1/anat/sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_seg-Glasser_stat-mean_desc-thickness_morph.tsv sub-1648798153/ses-PNC1/func sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_desc-dcan_qc.hdf5 -sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_desc-filtered_motion.json -sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_desc-filtered_motion.tsv +sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_desc-confounds_timeseries.json +sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_desc-confounds_timeseries.tsv sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_outliers.json sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_outliers.tsv sub-1648798153/ses-PNC1/func/sub-1648798153_ses-PNC1_task-idemo_space-fsLR_den-91k_desc-denoisedSmoothed_bold.dtseries.nii diff --git a/xcp_d/tests/data/test_ukbiobank_outputs.txt b/xcp_d/tests/data/test_ukbiobank_outputs.txt index 7fc655747..10741a503 100644 --- a/xcp_d/tests/data/test_ukbiobank_outputs.txt +++ b/xcp_d/tests/data/test_ukbiobank_outputs.txt @@ -70,8 +70,8 @@ sub-0000001/ses-01/anat sub-0000001/ses-01/anat/sub-0000001_ses-01_space-MNI152NLin6Asym_desc-aparcaseg_dseg.nii.gz sub-0000001/ses-01/anat/sub-0000001_ses-01_space-MNI152NLin6Asym_desc-preproc_T1w.nii.gz sub-0000001/ses-01/func -sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_desc-filtered_motion.json -sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_desc-filtered_motion.tsv +sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_desc-confounds_timeseries.json +sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_desc-confounds_timeseries.tsv sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_desc-preproc_design.json sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_desc-preproc_design.tsv sub-0000001/ses-01/func/sub-0000001_ses-01_task-rest_outliers.json diff --git a/xcp_d/tests/test_TR.py b/xcp_d/tests/test_TR.py index 858b9651b..e882d97a8 100644 --- a/xcp_d/tests/test_TR.py +++ b/xcp_d/tests/test_TR.py @@ -17,7 +17,7 @@ def test_removedummyvolumes_nifti(ds001419_data, tmp_path_factory): """Test RemoveDummyVolumes() for NIFTI input data.""" # Define inputs - temp_dir = tmp_path_factory.mktemp("test_RemoveDummyVolumes_nifti") + temp_dir = tmp_path_factory.mktemp("test_removedummyvolumes_nifti") boldfile = ds001419_data["nifti_file"] confounds_file = ds001419_data["confounds_file"] @@ -29,17 +29,17 @@ def test_removedummyvolumes_nifti(ds001419_data, tmp_path_factory): # Test a nifti file with 0 volumes to remove remove_nothing = RemoveDummyVolumes( bold_file=boldfile, - fmriprep_confounds_file=confounds_file, - confounds_file=confounds_file, - motion_file=confounds_file, + full_confounds=confounds_file, + modified_full_confounds=confounds_file, + design_matrix=confounds_file, temporal_mask=confounds_file, dummy_scans=0, ) results = remove_nothing.run(cwd=temp_dir) - undropped_confounds = pd.read_table(results.outputs.fmriprep_confounds_file_dropped_TR) + undropped_confounds = pd.read_table(results.outputs.full_confounds_dropped_TR) # Were the files created? assert op.exists(results.outputs.bold_file_dropped_TR) - assert op.exists(results.outputs.fmriprep_confounds_file_dropped_TR) + assert op.exists(results.outputs.full_confounds_dropped_TR) # Have the confounds stayed the same shape? assert undropped_confounds.shape == original_confounds.shape # Has the nifti stayed the same shape? @@ -51,17 +51,17 @@ def test_removedummyvolumes_nifti(ds001419_data, tmp_path_factory): for n in range(10): remove_n_vols = RemoveDummyVolumes( bold_file=boldfile, - fmriprep_confounds_file=confounds_file, - confounds_file=confounds_file, - motion_file=confounds_file, + full_confounds=confounds_file, + modified_full_confounds=confounds_file, + design_matrix=confounds_file, temporal_mask=confounds_file, dummy_scans=n, ) results = remove_n_vols.run(cwd=temp_dir) - dropped_confounds = pd.read_table(results.outputs.fmriprep_confounds_file_dropped_TR) + dropped_confounds = pd.read_table(results.outputs.full_confounds_dropped_TR) # Were the files created? assert op.exists(results.outputs.bold_file_dropped_TR) - assert op.exists(results.outputs.fmriprep_confounds_file_dropped_TR) + assert op.exists(results.outputs.full_confounds_dropped_TR) # Have the confounds changed correctly? assert dropped_confounds.shape[0] == original_confounds.shape[0] - n # Has the nifti changed correctly? @@ -91,17 +91,17 @@ def test_removedummyvolumes_cifti(ds001419_data, tmp_path_factory): # Test a cifti file with 0 volumes to remove remove_nothing = RemoveDummyVolumes( bold_file=boldfile, - fmriprep_confounds_file=confounds_file, - confounds_file=confounds_file, - motion_file=confounds_file, + full_confounds=confounds_file, + modified_full_confounds=confounds_file, + design_matrix=confounds_file, temporal_mask=confounds_file, dummy_scans=0, ) results = remove_nothing.run(cwd=temp_dir) - undropped_confounds = pd.read_table(results.outputs.fmriprep_confounds_file_dropped_TR) + undropped_confounds = pd.read_table(results.outputs.full_confounds_dropped_TR) # Were the files created? assert op.exists(results.outputs.bold_file_dropped_TR) - assert op.exists(results.outputs.fmriprep_confounds_file_dropped_TR) + assert op.exists(results.outputs.full_confounds_dropped_TR) # Have the confounds stayed the same shape? assert undropped_confounds.shape == original_confounds.shape # Has the cifti stayed the same shape? @@ -113,18 +113,18 @@ def test_removedummyvolumes_cifti(ds001419_data, tmp_path_factory): for n in range(10): remove_n_vols = RemoveDummyVolumes( bold_file=boldfile, - fmriprep_confounds_file=confounds_file, - confounds_file=confounds_file, - motion_file=confounds_file, + full_confounds=confounds_file, + modified_full_confounds=confounds_file, + design_matrix=confounds_file, temporal_mask=confounds_file, dummy_scans=n, ) results = remove_n_vols.run(cwd=temp_dir) - dropped_confounds = pd.read_table(results.outputs.fmriprep_confounds_file_dropped_TR) + dropped_confounds = pd.read_table(results.outputs.full_confounds_dropped_TR) # Were the files created? assert op.exists(results.outputs.bold_file_dropped_TR) - assert op.exists(results.outputs.fmriprep_confounds_file_dropped_TR) + assert op.exists(results.outputs.full_confounds_dropped_TR) # Have the confounds changed correctly? assert dropped_confounds.shape[0] == original_confounds.shape[0] - n # Has the cifti changed correctly? @@ -150,7 +150,7 @@ def test_removedummyvolumes_cifti(ds001419_data, tmp_path_factory): # # Run workflow # remvtr = RemoveDummyVolumes() # remvtr.inputs.bold_file = boldfile -# remvtr.inputs.fmriprep_confounds_file = confounds_tsv +# remvtr.inputs.full_confounds = confounds_tsv # remvtr.inputs.custom_confounds = custom_confounds_tsv # remvtr.inputs.dummy_scans = 5 # results = remvtr.run(cwd=temp_dir) @@ -177,7 +177,7 @@ def test_removedummyvolumes_cifti(ds001419_data, tmp_path_factory): # # Run workflow # remvtr = RemoveDummyVolumes() # remvtr.inputs.bold_file = boldfile -# remvtr.inputs.fmriprep_confounds_file = confounds_tsv +# remvtr.inputs.full_confounds = confounds_tsv # remvtr.inputs.custom_confounds = custom_confounds_tsv # remvtr.inputs.dummy_scans = 5 # results = remvtr.run(cwd=temp_dir) diff --git a/xcp_d/tests/test_cli_run.py b/xcp_d/tests/test_cli_run.py index 97e965136..7c0492c07 100644 --- a/xcp_d/tests/test_cli_run.py +++ b/xcp_d/tests/test_cli_run.py @@ -32,7 +32,11 @@ def base_opts(): "high_pass": 0.01, "low_pass": 0.1, "bandpass_filter": True, - "fd_thresh": 0.3, + "fd_thresh": [0.3, 0], + "dvars_thresh": [0, 0], + "censor_before": [0, 0], + "censor_after": [0, 0], + "censor_between": [0, 0], "min_time": 100, "motion_filter_type": "notch", "band_stop_min": 12, @@ -90,12 +94,12 @@ def test_validate_parameters_06(base_opts, base_parser, caplog): opts = deepcopy(base_opts) # Disable censoring - opts.fd_thresh = 0 + opts.fd_thresh = [0, 0] opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) assert opts.min_time == 0 - assert "Framewise displacement-based scrubbing is disabled." in caplog.text + assert "Censoring is disabled." in caplog.text def test_validate_parameters_07(base_opts, base_parser, capsys): @@ -317,3 +321,21 @@ def test_validate_parameters_21(base_opts, base_parser, caplog): assert "cifti processing (--cifti) will be disabled automatically." in caplog.text assert "(--warp-surfaces-native2std) will be disabled automatically." in caplog.text + + +def test_validate_parameters_22(base_opts, base_parser, caplog): + """Test parser._validate_parameters.""" + opts = deepcopy(base_opts) + opts.fd_thresh = [0.3] + opts.dvars_thresh = [1] + opts.censor_before = [1] + opts.censor_after = [1] + opts.censor_between = [1] + + opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) + + assert opts.fd_thresh == [0.3, 0.3] + assert opts.dvars_thresh == [1, 1] + assert opts.censor_before == [1, 0] + assert opts.censor_after == [1, 0] + assert opts.censor_between == [1, 0] diff --git a/xcp_d/tests/test_interfaces_censoring.py b/xcp_d/tests/test_interfaces_censoring.py index b81d9be2b..f1f6e2f1c 100644 --- a/xcp_d/tests/test_interfaces_censoring.py +++ b/xcp_d/tests/test_interfaces_censoring.py @@ -10,59 +10,102 @@ from xcp_d.interfaces import censoring -def test_generate_confounds(ds001419_data, tmp_path_factory): - """Check results.""" - tmpdir = tmp_path_factory.mktemp("test_generate_confounds") - in_file = ds001419_data["nifti_file"] +def test_modify_confounds(ds001419_data, tmp_path_factory): + """Test censoring.ModifyConfounds.""" + tmpdir = tmp_path_factory.mktemp("test_modify_confounds") confounds_file = ds001419_data["confounds_file"] confounds_json = ds001419_data["confounds_json"] df = pd.read_table(confounds_file) - with open(confounds_json, "r") as fo: - metadata = json.load(fo) - # Replace confounds tsv values with values that should be omitted - df.loc[1:3, "trans_x"] = [6, 8, 9] - df.loc[4:6, "trans_y"] = [7, 8, 9] - df.loc[7:9, "trans_z"] = [12, 8, 9] + # Run workflow + interface = censoring.ModifyConfounds( + head_radius=50, + TR=0.8, + full_confounds=confounds_file, + full_confounds_json=confounds_json, + motion_filter_type=None, + motion_filter_order=4, + band_stop_min=0, + band_stop_max=0, + ) + results = interface.run(cwd=tmpdir) + + assert os.path.isfile(results.outputs.modified_full_confounds) + assert isinstance(results.outputs.modified_full_confounds_metadata, dict) + + out_df = pd.read_table(results.outputs.modified_full_confounds) + assert out_df.shape == df.shape + + +def test_generate_temporal_mask(tmp_path_factory): + """Check results.""" + tmpdir = tmp_path_factory.mktemp("test_generate_temporal_mask") + + fd = np.zeros(20) + dvars = np.zeros(20) + + fd_idx = [(1, 0.4), (5, 0.2)] + for idx, val in fd_idx: + fd[idx] = val - # Modify JSON file - metadata["trans_x"] = {"test": "hello"} - confounds_json = os.path.join(tmpdir, "edited_confounds.json") - with open(confounds_json, "w") as fo: - json.dump(metadata, fo) + dvars_idx = [(11, 2.0), (15, 1.0)] + for idx, val in dvars_idx: + dvars[idx] = val - # Rename with same convention as initial confounds tsv - confounds_tsv = os.path.join(tmpdir, "edited_confounds.tsv") + df = pd.DataFrame({"framewise_displacement": fd, "std_dvars": dvars}) + confounds_tsv = os.path.join(tmpdir, "test.tsv") df.to_csv(confounds_tsv, sep="\t", index=False, header=True) + # Run workflow + interface = censoring.GenerateTemporalMask( + full_confounds=confounds_tsv, + fd_thresh=[0.3, 0.1], + dvars_thresh=[1.5, 0.5], + censor_before=[0, 0], + censor_after=[0, 0], + censor_between=[0, 0], + ) + results = interface.run(cwd=tmpdir) + + assert os.path.isfile(results.outputs.temporal_mask) + assert isinstance(results.outputs.temporal_mask_metadata, dict) + + out_df = pd.read_table(results.outputs.temporal_mask) + assert out_df.shape[0] == df.shape[0] + assert out_df.shape[1] == 6 # 6 types of outlier + + +def test_generate_design_matrix(ds001419_data, tmp_path_factory): + """Check results.""" + tmpdir = tmp_path_factory.mktemp("test_generate_design_matrix") + in_file = ds001419_data["nifti_file"] + confounds_file = ds001419_data["confounds_file"] + confounds_json = ds001419_data["confounds_json"] + + df = pd.read_table(confounds_file) + with open(confounds_json, "r") as fo: + metadata = json.load(fo) + custom_confounds_file = os.path.join(tmpdir, "custom_confounds.tsv") df2 = pd.DataFrame(columns=["signal__test"], data=np.random.random((df.shape[0], 1))) df2.to_csv(custom_confounds_file, sep="\t", index=False, header=True) # Run workflow - interface = censoring.GenerateConfounds( + interface = censoring.GenerateDesignMatrix( in_file=in_file, params="24P", - TR=0.8, - fd_thresh=0.3, - head_radius=50, - fmriprep_confounds_file=confounds_tsv, - fmriprep_confounds_json=confounds_json, + full_confounds=confounds_file, + full_confounds_metadata=metadata, custom_confounds_file=custom_confounds_file, - motion_filter_type=None, - motion_filter_order=4, - band_stop_min=0, - band_stop_max=0, ) results = interface.run(cwd=tmpdir) - assert os.path.isfile(results.outputs.filtered_confounds_file) - assert os.path.isfile(results.outputs.confounds_file) - assert os.path.isfile(results.outputs.motion_file) - assert os.path.isfile(results.outputs.temporal_mask) - out_confounds_file = results.outputs.confounds_file - out_df = pd.read_table(out_confounds_file) + assert os.path.isfile(results.outputs.design_matrix) + assert isinstance(results.outputs.design_matrix_metadata, dict) + + out_df = pd.read_table(results.outputs.design_matrix) + assert out_df.shape[0] == df.shape[0] assert out_df.shape[1] == 24 # 24(P) assert sum(out_df.columns.str.endswith("_orth")) == 24 # all 24(P) @@ -73,11 +116,21 @@ def test_random_censor(tmp_path_factory): n_volumes, n_outliers = 500, 100 exact_scans = [100, 200, 300, 400] - outliers_arr = np.zeros(n_volumes, dtype=int) + outliers_arr = np.zeros((n_volumes, 6), dtype=int) rng = np.random.default_rng(0) outlier_idx = rng.choice(np.arange(n_volumes, dtype=int), size=n_outliers, replace=False) - outliers_arr[outlier_idx] = 1 - temporal_mask_df = pd.DataFrame(data=outliers_arr, columns=["framewise_displacement"]) + outliers_arr[outlier_idx, :] = 1 + temporal_mask_df = pd.DataFrame( + data=outliers_arr, + columns=[ + "framewise_displacement", + "dvars", + "denoising", + "framewise_displacement_interpolation", + "dvars_interpolation", + "interpolation", + ], + ) original_temporal_mask = os.path.join(tmpdir, "orig_tmask.tsv") temporal_mask_df.to_csv(original_temporal_mask, index=False, sep="\t") @@ -130,7 +183,17 @@ def test_censor(ds001419_data, tmp_path_factory): cifti_file = ds001419_data["cifti_file"] in_img = nb.load(nifti_file) n_volumes = in_img.shape[3] - censoring_df = pd.DataFrame(columns=["framewise_displacement"], data=np.zeros(n_volumes)) + censoring_df = pd.DataFrame( + data=np.zeros((n_volumes, 6)), + columns=[ + "framewise_displacement", + "dvars", + "denoising", + "framewise_displacement_interpolation", + "dvars_interpolation", + "interpolation", + ], + ) temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") censoring_df.to_csv(temporal_mask, sep="\t", index=False) @@ -159,7 +222,7 @@ def test_censor(ds001419_data, tmp_path_factory): # Test with a NIfTI file, with some censored volumes n_censored_volumes = 10 n_retained_volumes = n_volumes - n_censored_volumes - censoring_df.loc[range(10), "framewise_displacement"] = 1 + censoring_df.loc[range(10), "interpolation"] = 1 censoring_df.to_csv(temporal_mask, sep="\t", index=False) interface = censoring.Censor( in_file=nifti_file, diff --git a/xcp_d/tests/test_interfaces_concatenation.py b/xcp_d/tests/test_interfaces_concatenation.py index 93be67792..942b132b5 100644 --- a/xcp_d/tests/test_interfaces_concatenation.py +++ b/xcp_d/tests/test_interfaces_concatenation.py @@ -40,8 +40,8 @@ def test_filteroutfailedruns(ds001419_data): n_runs = len(censored_denoised_bold) n_good_runs = 2 preprocessed_bold = [nifti_file] * n_runs - fmriprep_confounds_file = [tsv_file] * n_runs - filtered_motion = [tsv_file] * n_runs + full_confounds = [tsv_file] * n_runs + modified_full_confounds = [tsv_file] * n_runs temporal_mask = [nifti_file] * n_runs denoised_interpolated_bold = [nifti_file] * n_runs @@ -57,8 +57,8 @@ def test_filteroutfailedruns(ds001419_data): interface = concatenation.FilterOutFailedRuns( censored_denoised_bold=censored_denoised_bold, preprocessed_bold=preprocessed_bold, - fmriprep_confounds_file=fmriprep_confounds_file, - filtered_motion=filtered_motion, + full_confounds=full_confounds, + modified_full_confounds=modified_full_confounds, temporal_mask=temporal_mask, denoised_interpolated_bold=denoised_interpolated_bold, smoothed_denoised_bold=smoothed_denoised_bold, @@ -71,8 +71,8 @@ def test_filteroutfailedruns(ds001419_data): out = results.outputs assert len(out.censored_denoised_bold) == n_good_runs assert len(out.preprocessed_bold) == n_good_runs - assert len(out.fmriprep_confounds_file) == n_good_runs - assert len(out.filtered_motion) == n_good_runs + assert len(out.full_confounds) == n_good_runs + assert len(out.modified_full_confounds) == n_good_runs assert len(out.temporal_mask) == n_good_runs assert len(out.denoised_interpolated_bold) == n_good_runs assert len(out.smoothed_denoised_bold) == n_good_runs @@ -94,8 +94,8 @@ def test_concatenateinputs(ds001419_data, tmp_path_factory): n_atlases = 3 censored_denoised_bold = [nifti_file] * n_runs preprocessed_bold = [nifti_file] * n_runs - fmriprep_confounds_file = [tsv_file] * n_runs - filtered_motion = [tsv_file] * n_runs + full_confounds = [tsv_file] * n_runs + modified_full_confounds = [tsv_file] * n_runs temporal_mask = [tsv_file] * n_runs denoised_interpolated_bold = [nifti_file] * n_runs @@ -112,8 +112,8 @@ def test_concatenateinputs(ds001419_data, tmp_path_factory): denoised_interpolated_bold=denoised_interpolated_bold, smoothed_denoised_bold=smoothed_denoised_bold, timeseries_ciftis=timeseries_ciftis, - fmriprep_confounds_file=fmriprep_confounds_file, - filtered_motion=filtered_motion, + full_confounds=full_confounds, + modified_full_confounds=modified_full_confounds, temporal_mask=temporal_mask, timeseries=timeseries, ) @@ -125,8 +125,8 @@ def test_concatenateinputs(ds001419_data, tmp_path_factory): assert not isdefined(out.smoothed_denoised_bold) assert len(out.timeseries_ciftis) == n_atlases assert all(os.path.isfile(f) for f in out.timeseries_ciftis) - assert os.path.isfile(out.fmriprep_confounds_file) - assert os.path.isfile(out.filtered_motion) + assert os.path.isfile(out.full_confounds) + assert os.path.isfile(out.modified_full_confounds) assert os.path.isfile(out.temporal_mask) assert len(out.timeseries) == n_atlases assert all(os.path.isfile(f) for f in out.timeseries) diff --git a/xcp_d/tests/test_interfaces_nilearn.py b/xcp_d/tests/test_interfaces_nilearn.py index c5ee9dc89..77606e426 100644 --- a/xcp_d/tests/test_interfaces_nilearn.py +++ b/xcp_d/tests/test_interfaces_nilearn.py @@ -107,8 +107,8 @@ def test_nilearn_denoisenifti(ds001419_data, tmp_path_factory): # Create the censoring file censoring_df = confounds_df[["framewise_displacement"]] - censoring_df["framewise_displacement"] = censoring_df["framewise_displacement"] > 0.3 - assert censoring_df["framewise_displacement"].sum() > 0 + censoring_df["denoising"] = censoring_df["framewise_displacement"] > 0.3 + assert censoring_df["denoising"].sum() > 0 temporal_mask = os.path.join(tmpdir, "censoring.tsv") censoring_df.to_csv(temporal_mask, sep="\t", index=False) @@ -116,7 +116,7 @@ def test_nilearn_denoisenifti(ds001419_data, tmp_path_factory): interface = nilearn.DenoiseNifti( preprocessed_bold=preprocessed_bold, - confounds_file=reduced_confounds_file, + design_matrix=reduced_confounds_file, temporal_mask=temporal_mask, mask=mask, TR=2, @@ -145,8 +145,8 @@ def test_nilearn_denoisecifti(ds001419_data, tmp_path_factory): # Create the censoring file censoring_df = confounds_df[["framewise_displacement"]] - censoring_df["framewise_displacement"] = censoring_df["framewise_displacement"] > 0.3 - assert censoring_df["framewise_displacement"].sum() > 0 + censoring_df["denoising"] = censoring_df["framewise_displacement"] > 0.3 + assert censoring_df["denoising"].sum() > 0 temporal_mask = os.path.join(tmpdir, "censoring.tsv") censoring_df.to_csv(temporal_mask, sep="\t", index=False) @@ -154,7 +154,7 @@ def test_nilearn_denoisecifti(ds001419_data, tmp_path_factory): interface = nilearn.DenoiseCifti( preprocessed_bold=preprocessed_bold, - confounds_file=reduced_confounds_file, + design_matrix=reduced_confounds_file, temporal_mask=temporal_mask, TR=2, bandpass_filter=True, diff --git a/xcp_d/tests/test_utils_boilerplate.py b/xcp_d/tests/test_utils_boilerplate.py index 8f8f413dd..fd29572b4 100644 --- a/xcp_d/tests/test_utils_boilerplate.py +++ b/xcp_d/tests/test_utils_boilerplate.py @@ -70,36 +70,61 @@ def test_describe_motion_parameters(): def test_describe_censoring(): """Test boilerplate.describe_censoring.""" - motion_filter_type = "notch" - fd_thresh = 0.2 - exact_scans = [] - desc = boilerplate.describe_censoring(motion_filter_type, fd_thresh, exact_scans) + desc = boilerplate.describe_censoring( + motion_filter_type="notch", + fd_thresh=[0.2, 0.2], + dvars_thresh=[0, 0], + censor_before=[0, 0], + censor_after=[0, 0], + censor_between=[0, 0], + exact_scans=[], + ) assert "Volumes with filtered framewise displacement" in desc - motion_filter_type = None - fd_thresh = 0.2 - exact_scans = [] - desc = boilerplate.describe_censoring(motion_filter_type, fd_thresh, exact_scans) + desc = boilerplate.describe_censoring( + motion_filter_type=None, + fd_thresh=[0.2, 0.2], + dvars_thresh=[0, 0], + censor_before=[0, 0], + censor_after=[0, 0], + censor_between=[0, 0], + exact_scans=[], + ) assert "Volumes with framewise displacement" in desc - motion_filter_type = None - fd_thresh = 0.2 - exact_scans = [100, 200, 300] - desc = boilerplate.describe_censoring(motion_filter_type, fd_thresh, exact_scans) + desc = boilerplate.describe_censoring( + motion_filter_type=None, + fd_thresh=[0.2, 0.2], + dvars_thresh=[0, 0], + censor_before=[0, 0], + censor_after=[0, 0], + censor_between=[0, 0], + exact_scans=[100, 200, 300], + ) assert "Volumes with framewise displacement" in desc assert "limited to 100, 200, and 300 volumes" in desc - motion_filter_type = None - fd_thresh = 0 - exact_scans = [100, 200, 300] - desc = boilerplate.describe_censoring(motion_filter_type, fd_thresh, exact_scans) + desc = boilerplate.describe_censoring( + motion_filter_type=None, + fd_thresh=[0, 0], + dvars_thresh=[0, 0], + censor_before=[0, 0], + censor_after=[0, 0], + censor_between=[0, 0], + exact_scans=[100, 200, 300], + ) assert "Volumes were randomly selected for censoring" in desc assert "limited to 100, 200, and 300 volumes" in desc - motion_filter_type = "notch" - fd_thresh = 0 - exact_scans = [100, 200, 300] - desc = boilerplate.describe_censoring(motion_filter_type, fd_thresh, exact_scans) + desc = boilerplate.describe_censoring( + motion_filter_type="notch", + fd_thresh=[0, 0], + dvars_thresh=[0, 0], + censor_before=[0, 0], + censor_after=[0, 0], + censor_between=[0, 0], + exact_scans=[100, 200, 300], + ) assert "Volumes were randomly selected for censoring" in desc assert "limited to 100, 200, and 300 volumes" in desc diff --git a/xcp_d/tests/test_utils_confounds.py b/xcp_d/tests/test_utils_confounds.py index 5f5d934ee..8241ad6f5 100644 --- a/xcp_d/tests/test_utils_confounds.py +++ b/xcp_d/tests/test_utils_confounds.py @@ -310,3 +310,133 @@ def test_motion_filtering_notch(): ) notch_data_test = np.squeeze(notch_data_test) assert np.allclose(notch_data_test, notch_data_true) + + +def test_calculate_outliers(): + """Test confounds.calculate_outliers.""" + fd = np.zeros(20) + dvars = np.zeros(20) + + fd_idx = [(1, 0.4), (5, 0.2)] + for idx, val in fd_idx: + fd[idx] = val + + dvars_idx = [(11, 2.0), (15, 1.0)] + for idx, val in dvars_idx: + dvars[idx] = val + + df = pd.DataFrame({"framewise_displacement": fd, "std_dvars": dvars}) + + # Apply both thresholds without expanded censoring + outliers_df = confounds.calculate_outliers( + confounds=df, + fd_thresh=[0.3, 0.1], + dvars_thresh=[1.5, 0.5], + before=[0, 0], + after=[0, 0], + between=[0, 0], + ) + assert outliers_df.shape[0] == df.shape[0] + assert outliers_df.shape[1] == 6 # 6 types of outlier + assert np.array_equal(np.where(outliers_df["framewise_displacement"].values)[0], [1]) + assert np.array_equal(np.where(outliers_df["dvars"].values)[0], [11]) + assert np.array_equal(np.where(outliers_df["denoising"].values)[0], [1, 11]) + assert np.array_equal( + np.where(outliers_df["framewise_displacement_interpolation"].values)[0], + [1, 5], + ) + assert np.array_equal(np.where(outliers_df["dvars_interpolation"].values)[0], [11, 15]) + assert np.array_equal(np.where(outliers_df["interpolation"].values)[0], [1, 5, 11, 15]) + + # Apply both thresholds and use expanded censoring + outliers_df = confounds.calculate_outliers( + confounds=df, + fd_thresh=[0.3, 0.1], + dvars_thresh=[1.5, 0.5], + before=[1, 1], + after=[1, 1], + between=[1, 1], + ) + assert outliers_df.shape[0] == df.shape[0] + assert outliers_df.shape[1] == 6 # 6 types of outlier + assert np.array_equal(np.where(outliers_df["framewise_displacement"].values)[0], [1]) + assert np.array_equal(np.where(outliers_df["dvars"].values)[0], [11]) + assert np.array_equal(np.where(outliers_df["denoising"].values)[0], [0, 1, 2, 10, 11, 12]) + assert np.array_equal( + np.where(outliers_df["framewise_displacement_interpolation"].values)[0], + [1, 5], + ) + assert np.array_equal(np.where(outliers_df["dvars_interpolation"].values)[0], [11, 15]) + assert np.array_equal( + np.where(outliers_df["interpolation"].values)[0], + [0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16], + ) + + # Drop FD column and try to apply FD-based censoring + temp_df = df[["std_dvars"]] + with pytest.raises(ValueError, match="FD-based censoring is not possible"): + confounds.calculate_outliers( + confounds=temp_df, + fd_thresh=[0.3, 0.1], + dvars_thresh=[1.5, 0.5], + before=[0, 0], + after=[0, 0], + between=[0, 0], + ) + + # Apply DVARS-based censoring when FD column is missing + outliers_df = confounds.calculate_outliers( + confounds=temp_df, + fd_thresh=[0, 0], + dvars_thresh=[1.5, 0.5], + before=[1, 1], + after=[1, 1], + between=[1, 1], + ) + assert outliers_df.shape[0] == df.shape[0] + assert outliers_df.shape[1] == 6 # 6 types of outlier + assert np.array_equal(np.where(outliers_df["framewise_displacement"].values)[0], []) + assert np.array_equal(np.where(outliers_df["dvars"].values)[0], [11]) + assert np.array_equal(np.where(outliers_df["denoising"].values)[0], [10, 11, 12]) + assert np.array_equal( + np.where(outliers_df["framewise_displacement_interpolation"].values)[0], + [], + ) + assert np.array_equal(np.where(outliers_df["dvars_interpolation"].values)[0], [11, 15]) + assert np.array_equal( + np.where(outliers_df["interpolation"].values)[0], + [9, 10, 11, 12, 13, 14, 15, 16], + ) + + # Drop DVARS column and try to apply DVARS-based censoring + temp_df = df[["framewise_displacement"]] + with pytest.raises(ValueError, match="DVARS-based censoring is not possible"): + confounds.calculate_outliers( + confounds=temp_df, + fd_thresh=[0.3, 0.1], + dvars_thresh=[1.5, 0.5], + before=[0, 0], + after=[0, 0], + between=[0, 0], + ) + + # Apply FD-based censoring when DVARS column is missing + outliers_df = confounds.calculate_outliers( + confounds=temp_df, + fd_thresh=[0.3, 0.1], + dvars_thresh=[0, 0], + before=[1, 1], + after=[1, 1], + between=[1, 1], + ) + assert outliers_df.shape[0] == df.shape[0] + assert outliers_df.shape[1] == 6 # 6 types of outlier + assert np.array_equal(np.where(outliers_df["framewise_displacement"].values)[0], [1]) + assert np.array_equal(np.where(outliers_df["dvars"].values)[0], []) + assert np.array_equal(np.where(outliers_df["denoising"].values)[0], [0, 1, 2]) + assert np.array_equal( + np.where(outliers_df["framewise_displacement_interpolation"].values)[0], + [1, 5], + ) + assert np.array_equal(np.where(outliers_df["dvars_interpolation"].values)[0], []) + assert np.array_equal(np.where(outliers_df["interpolation"].values)[0], [0, 1, 2, 3, 4, 5, 6]) diff --git a/xcp_d/tests/test_utils_plotting.py b/xcp_d/tests/test_utils_plotting.py index 037ea54d4..8432871bb 100644 --- a/xcp_d/tests/test_utils_plotting.py +++ b/xcp_d/tests/test_utils_plotting.py @@ -16,23 +16,31 @@ def test_plot_fmri_es(ds001419_data, tmp_path_factory): denoised_interpolated_bold = ds001419_data["cifti_file"] # Using unfiltered FD instead of calculating filtered version. - filtered_motion = ds001419_data["confounds_file"] + modified_full_confounds = ds001419_data["confounds_file"] preprocessed_figure = os.path.join(tmpdir, "unprocessed.svg") denoised_figure = os.path.join(tmpdir, "processed.svg") t_r = 2 - n_volumes = pd.read_table(filtered_motion).shape[0] - tmask_arr = np.zeros(n_volumes, dtype=bool) - tmask_arr[:10] = True # flag first 10 volumes as bad + n_volumes = pd.read_table(modified_full_confounds).shape[0] + tmask_arr = np.zeros((n_volumes, 6), dtype=bool) + tmask_arr[:10, :] = True # flag first 10 volumes as bad tmask_arr = tmask_arr.astype(int) temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") - pd.DataFrame(columns=["framewise_displacement"], data=tmask_arr).to_csv( - temporal_mask, sep="\t", index=False - ) + pd.DataFrame( + data=tmask_arr, + columns=[ + "framewise_displacement", + "dvars", + "denoising", + "framewise_displacement_interpolation", + "dvars_interpolation", + "interpolation", + ], + ).to_csv(temporal_mask, sep="\t", index=False) out_file1, out_file2 = plotting.plot_fmri_es( preprocessed_bold=preprocessed_bold, denoised_interpolated_bold=denoised_interpolated_bold, - filtered_motion=filtered_motion, + modified_full_confounds=modified_full_confounds, preprocessed_figure=preprocessed_figure, denoised_figure=denoised_figure, TR=t_r, @@ -49,7 +57,7 @@ def test_plot_fmri_es(ds001419_data, tmp_path_factory): out_file1, out_file2 = plotting.plot_fmri_es( preprocessed_bold=preprocessed_bold, denoised_interpolated_bold=denoised_interpolated_bold, - filtered_motion=filtered_motion, + modified_full_confounds=modified_full_confounds, preprocessed_figure=preprocessed_figure, denoised_figure=denoised_figure, TR=t_r, diff --git a/xcp_d/tests/test_workflows_connectivity.py b/xcp_d/tests/test_workflows_connectivity.py index 36e01d2ad..edd45843e 100644 --- a/xcp_d/tests/test_workflows_connectivity.py +++ b/xcp_d/tests/test_workflows_connectivity.py @@ -98,12 +98,19 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): # Create a fake temporal mask to satisfy the workflow n_volumes = bold_data.shape[1] + data = np.zeros((n_volumes, 7)) + data[:10, -1] = 1 censoring_df = pd.DataFrame( - columns=["framewise_displacement", "exact_10"], - data=np.stack( - (np.zeros(n_volumes), np.concatenate((np.ones(10), np.zeros(n_volumes - 10)))), - axis=1, - ), + columns=[ + "framewise_displacement", + "dvars", + "denoising", + "framewise_displacement_interpolation", + "dvars_interpolation", + "interpolation", + "exact_10", + ], + data=data, ) temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") censoring_df.to_csv(temporal_mask, sep="\t", index=False) @@ -141,6 +148,7 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): config.workflow.bandpass_filter = False config.workflow.min_coverage = 0.5 config.nipype.omp_nthreads = 2 + config.execution.atlases = atlas_names connectivity_wf = init_functional_connectivity_nifti_wf( mem_gb=mem_gbx, @@ -246,12 +254,19 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): # Create a fake temporal mask to satisfy the workflow n_volumes = bold_data.shape[1] + data = np.zeros((n_volumes, 7)) + data[:10, -1] = 1 censoring_df = pd.DataFrame( - columns=["framewise_displacement", "exact_10"], - data=np.stack( - (np.zeros(n_volumes), np.concatenate((np.ones(10), np.zeros(n_volumes - 10)))), - axis=1, - ), + columns=[ + "framewise_displacement", + "dvars", + "denoising", + "framewise_displacement_interpolation", + "dvars_interpolation", + "interpolation", + "exact_10", + ], + data=data, ) temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") censoring_df.to_csv(temporal_mask, sep="\t", index=False) diff --git a/xcp_d/tests/test_workflows_restingstate.py b/xcp_d/tests/test_workflows_restingstate.py index 5177819e5..4dfb8831a 100644 --- a/xcp_d/tests/test_workflows_restingstate.py +++ b/xcp_d/tests/test_workflows_restingstate.py @@ -37,7 +37,8 @@ def test_nifti_alff(ds001419_data, tmp_path_factory): config.workflow.cifti = False config.workflow.low_pass = 0.08 config.workflow.high_pass = 0.01 - config.workflow.fd_thresh = 0 + config.workflow.fd_thresh = [0, 0] + config.workflow.dvars_thresh = [0, 0] config.workflow.smoothing = 6 config.nipype.omp_nthreads = 2 @@ -124,7 +125,8 @@ def test_cifti_alff(ds001419_data, tmp_path_factory): config.workflow.cifti = True config.workflow.low_pass = 0.08 config.workflow.high_pass = 0.01 - config.workflow.fd_thresh = 0.1 + config.workflow.fd_thresh = [0.1, 0] + config.workflow.dvars_thresh = [0, 0] config.workflow.smoothing = 6 config.nipype.omp_nthreads = 2 diff --git a/xcp_d/utils/boilerplate.py b/xcp_d/utils/boilerplate.py index 6b68d45ad..cace74619 100644 --- a/xcp_d/utils/boilerplate.py +++ b/xcp_d/utils/boilerplate.py @@ -91,13 +91,26 @@ def describe_motion_parameters( @fill_doc -def describe_censoring(motion_filter_type, fd_thresh, exact_scans): +def describe_censoring( + *, + motion_filter_type, + fd_thresh, + dvars_thresh, + censor_before, + censor_after, + censor_between, + exact_scans, +): """Build a text description of the FD censoring process. Parameters ---------- %(motion_filter_type)s %(fd_thresh)s + dvars_thresh + censor_before + censor_after + censor_between %(exact_scans)s Returns @@ -106,14 +119,48 @@ def describe_censoring(motion_filter_type, fd_thresh, exact_scans): A text description of the censoring procedure. """ desc = "" - if fd_thresh > 0: - desc += ( - f"Volumes with {'filtered ' if motion_filter_type else ''}framewise displacement " - f"greater than {fd_thresh} mm were flagged as high-motion outliers for the sake of " - "later censoring [@power_fd_dvars]." - ) + censor = any(t > 0 for t in fd_thresh + dvars_thresh) + if censor: + if fd_thresh[0] > 0 or dvars_thresh[0] > 0: + desc += ( + f"Volumes with {'filtered ' if motion_filter_type else ''}framewise displacement " + f"greater than {fd_thresh[0]} mm or DVARS greater than {dvars_thresh[0]} " + "were flagged as outliers for censoring as part of the denoising step " + "[@power_fd_dvars]. " + ) + if censor_before[0] > 0 or censor_after[0] > 0: + desc += ( + f"Additionally, {censor_before[0]} volumes before and {censor_after[0]} " + "volumes after each outlier were flagged for censoring. " + ) + + if censor_between[0] > 0: + desc += ( + "Any sequences of non-outlier volumes shorter than or equal to " + f"{censor_between[0]} volumes were also flagged for censoring. " + ) + + if fd_thresh[1] > 0 or dvars_thresh[1] > 0: + desc += ( + f"Volumes with {'filtered ' if motion_filter_type else ''}framewise displacement " + f"greater than {fd_thresh[1]} mm or DVARS greater than {dvars_thresh[1]} " + "were flagged as outliers for censoring after interpolation and temporal " + "filtering. " + ) + + if censor_before[1] > 0 or censor_after[1] > 0: + desc += ( + f"Additionally, {censor_before[1]} volumes before and {censor_after[1]} " + "volumes after each outlier were flagged for censoring. " + ) + + if censor_between[1] > 0: + desc += ( + "Any sequences of non-outlier volumes shorter than or equal to " + f"{censor_between[1]} volumes were also flagged for censoring. " + ) - if exact_scans and (fd_thresh > 0): + if exact_scans and censor: desc += ( " Additional sets of censoring volumes were randomly selected to produce additional " f"correlation matrices limited to {list_to_str(exact_scans)} volumes." diff --git a/xcp_d/utils/confounds.py b/xcp_d/utils/confounds.py index 28f645488..a17cee9be 100644 --- a/xcp_d/utils/confounds.py +++ b/xcp_d/utils/confounds.py @@ -9,6 +9,7 @@ import pandas as pd from nilearn.interfaces.fmriprep.load_confounds import _load_single_confounds_file from nipype import logging +from scipy import ndimage from scipy.signal import butter, filtfilt, iirnotch from xcp_d.utils.doc import fill_doc @@ -17,7 +18,7 @@ @fill_doc -def load_motion( +def filter_motion( confounds_df, TR, motion_filter_type=None, @@ -41,7 +42,7 @@ def load_motion( Returns ------- - motion_confounds_df : pandas.DataFrame + confounds_df : pandas.DataFrame The six motion regressors. The three rotations are listed first, then the three translations. @@ -52,50 +53,44 @@ def load_motion( if motion_filter_type not in ("lp", "notch", None): raise ValueError(f"Motion filter type '{motion_filter_type}' not supported.") - # Select the motion columns from the overall confounds DataFrame - motion_confounds_df = confounds_df[ - ["rot_x", "rot_y", "rot_z", "trans_x", "trans_y", "trans_z"] - ] + confounds_df = confounds_df.copy() + motion_columns = ["rot_x", "rot_y", "rot_z", "trans_x", "trans_y", "trans_z"] # Apply LP or notch filter if motion_filter_type in ("lp", "notch"): - motion_confounds = motion_regression_filter( - data=motion_confounds_df.to_numpy(), + confounds_df[motion_columns] = motion_regression_filter( + data=confounds_df[motion_columns].to_numpy(), TR=TR, motion_filter_type=motion_filter_type, band_stop_min=band_stop_min, band_stop_max=band_stop_max, motion_filter_order=motion_filter_order, ) - motion_confounds_df = pd.DataFrame( - data=motion_confounds, - columns=motion_confounds_df.columns, - ) # Volterra expansion # Ignore pandas SettingWithCopyWarning with pd.option_context("mode.chained_assignment", None): - columns = motion_confounds_df.columns.tolist() - for col in columns: - new_col = f"{col}_derivative1" - motion_confounds_df[new_col] = motion_confounds_df[col].diff() + columns_to_square = motion_columns[:] + for col in motion_columns: + deriv_col = f"{col}_derivative1" + confounds_df[deriv_col] = confounds_df[col].diff() + columns_to_square.append(deriv_col) - columns = motion_confounds_df.columns.tolist() - for col in columns: - new_col = f"{col}_power2" - motion_confounds_df[new_col] = motion_confounds_df[col] ** 2 + for col in columns_to_square: + square_col = f"{col}_power2" + confounds_df[square_col] = confounds_df[col] ** 2 - return motion_confounds_df + return confounds_df @fill_doc -def get_custom_confounds(custom_confounds_folder, fmriprep_confounds_file): +def get_custom_confounds(custom_confounds_folder, full_confounds): """Identify a custom confounds file. Parameters ---------- %(custom_confounds_folder)s - %(fmriprep_confounds_file)s + full_confounds We expect the custom confounds file to have the same name. Returns @@ -112,7 +107,7 @@ def get_custom_confounds(custom_confounds_folder, fmriprep_confounds_file): f"Custom confounds location does not exist: {custom_confounds_folder}" ) - custom_confounds_filename = os.path.basename(fmriprep_confounds_file) + custom_confounds_filename = os.path.basename(full_confounds) custom_confounds_file = os.path.abspath( os.path.join( custom_confounds_folder, @@ -553,3 +548,163 @@ def _infer_dummy_scans(dummy_scans, confounds_file=None): dummy_scans = 0 return dummy_scans + + +def censor_basic(array, threshold): + """Censor basic.""" + array[np.isnan(array)] = 0 + if threshold > 0: + outlier_mask = array > threshold + else: + outlier_mask = np.zeros_like(array, dtype=bool) + + return outlier_mask + + +def censor_around(outlier_mask, before, after, between): + """Censor volumes adjacent to outliers. + + Parameters + ---------- + outlier_mask : array + Boolean array where True indicates an outlier. + before : int + Number of volumes to censor before each outlier. + after : int + Number of volumes to censor after each outlier. + between : int + Number of volumes to censor between each outlier. + + Returns + ------- + outlier_mask : array + Boolean array with additional volumes censored. + """ + # Censoring before and after outliers + n_vols = len(outlier_mask) + outlier_mask = outlier_mask.astype(bool).copy() + outliers_idx = np.where(outlier_mask)[0] # index untouched by expansion + for i in outliers_idx: + outlier_mask[max(0, i - before) : i] = True + outlier_mask[i + 1 : min(i + 1 + after, n_vols)] = True + + # Find any contiguous sequences of 0s and censor them if the length of the sequence is + # less than or equal to the censor_between value. + # Find all contiguous sequences of Falses + labeled_array, n_uncensored_groups = ndimage.label(~outlier_mask) + + # Iterate over each contiguous sequence + for i in range(1, n_uncensored_groups + 1): + slice_ = ndimage.find_objects(labeled_array == i)[0][0] + length = slice_.stop - slice_.start + + # If the length of the contiguous sequence is less than censor_between, + # set the values to True + if length <= between: + outlier_mask[slice_] = True + + return outlier_mask + + +def calculate_outliers(*, confounds, fd_thresh, dvars_thresh, before, after, between): + """Generate a temporal mask DataFrame. + + Parameters + ---------- + confounds : pandas.DataFrame + DataFrame with confounds. + The two columns that will be used are "framewise_displacement" and "std_dvars". + fd_thresh : tuple + Tuple of two floats. The first value is the threshold for censoring during denoising, + and the second value is the threshold for censoring during interpolation. + dvars_thresh : tuple + Tuple of two floats. The first value is the threshold for censoring during denoising, + and the second value is the threshold for censoring during interpolation. + before : tuple + Tuple of two integers. The first value is the number of volumes to censor before each + FD or DVARS outlier volume during denoising, and the second value is the number of volumes + to censor before each FD or DVARS outlier volume during interpolation. + after : tuple + Tuple of two integers. The first value is the number of volumes to censor after each + FD or DVARS outlier volume during denoising, and the second value is the number of volumes + to censor after each FD or DVARS outlier volume during interpolation. + between : tuple + Tuple of two integers. The first value is the number of volumes to censor between each + FD or DVARS outlier volume during denoising, and the second value is the number of volumes + to censor between each FD or DVARS outlier volume during interpolation. + + Returns + ------- + outliers_df : pandas.DataFrame + DataFrame with columns for each type of censoring. + """ + # Generate temporal mask with all timepoints have FD/DVARS over threshold set to 1. + n_vols = confounds.shape[0] + + # Grab FD time series + if ( + fd_thresh[0] > 0 or fd_thresh[1] > 0 + ) and "framewise_displacement" not in confounds.columns: + raise ValueError( + "The 'framewise_displacement' column is missing from the fMRIPrep confounds file. " + "FD-based censoring is not possible." + ) + elif "framewise_displacement" in confounds.columns: + fd_timeseries = confounds["framewise_displacement"].to_numpy() + else: + # Create a fake array that won't actually be used + fd_timeseries = np.zeros(n_vols) + + # Grab DVARS time series + if (dvars_thresh[0] > 0 or dvars_thresh[1] > 0) and "std_dvars" not in confounds.columns: + raise ValueError( + "The 'std_dvars' column is missing from the fMRIPrep confounds file. " + "DVARS-based censoring is not possible." + ) + elif "std_dvars" in confounds.columns: + dvars_timeseries = confounds["std_dvars"].to_numpy() + else: + # Create a fake array that won't actually be used + dvars_timeseries = np.zeros(n_vols) + + outliers_df = pd.DataFrame( + columns=[ + "framewise_displacement", + "dvars", + "denoising", + "framewise_displacement_interpolation", + "dvars_interpolation", + "interpolation", + ], + data=np.zeros((n_vols, 6), dtype=bool), + ) + + # Censoring for denoising + outliers_df["framewise_displacement"] = censor_basic(fd_timeseries, fd_thresh[0]) + outliers_df["dvars"] = censor_basic(dvars_timeseries, dvars_thresh[0]) + + outlier_mask = np.logical_or(outliers_df["framewise_displacement"], outliers_df["dvars"]) + outliers_df["denoising"] = censor_around( + outlier_mask, + before=before[0], + after=after[0], + between=between[0], + ) + + # Censoring for interpolated data (to remove interpolated signals that might be spread to + # other volumes by the temporal filter) + outliers_df["framewise_displacement_interpolation"] = censor_basic(fd_timeseries, fd_thresh[1]) + outliers_df["dvars_interpolation"] = censor_basic(dvars_timeseries, dvars_thresh[1]) + + # Interpolation outlier index should include all outliers form denoising index, + # because all volumes in denoising index are interpolated in denoising step. + outlier_mask = outliers_df[ + ["denoising", "framewise_displacement_interpolation", "dvars_interpolation"] + ].any(axis=1) + outliers_df["interpolation"] = censor_around( + outlier_mask, + before=before[1], + after=after[1], + between=between[1], + ) + return outliers_df diff --git a/xcp_d/utils/doc.py b/xcp_d/utils/doc.py index 598bf8017..2114c279a 100644 --- a/xcp_d/utils/doc.py +++ b/xcp_d/utils/doc.py @@ -101,9 +101,9 @@ """ docdict[ - "fmriprep_confounds_file" + "full_confounds" ] = """ -fmriprep_confounds_file : :obj:`str` +full_confounds : :obj:`str` Confounds TSV file from preprocessing derivatives. """ @@ -430,9 +430,9 @@ """ docdict[ - "filtered_motion" + "modified_full_confounds" ] = """ -filtered_motion : :obj:`str` +modified_full_confounds : :obj:`str` Framewise displacement timeseries, potentially after bandstop or low-pass filtering. This is a TSV file with one column: 'framewise_displacement'. """ diff --git a/xcp_d/utils/modified_data.py b/xcp_d/utils/modified_data.py index 9efb2c93c..9c4f8f833 100644 --- a/xcp_d/utils/modified_data.py +++ b/xcp_d/utils/modified_data.py @@ -8,7 +8,12 @@ import pandas as pd from nipype import logging -from xcp_d.utils.confounds import _infer_dummy_scans, _modify_motion_filter, load_motion +from xcp_d.utils.confounds import ( + _infer_dummy_scans, + _modify_motion_filter, + calculate_outliers, + filter_motion, +) from xcp_d.utils.doc import fill_doc from xcp_d.utils.filemanip import fname_presuffix @@ -137,7 +142,7 @@ def downcast_to_32(in_file): @fill_doc def flag_bad_run( - fmriprep_confounds_file, + full_confounds, dummy_scans, TR, motion_filter_type, @@ -146,12 +151,16 @@ def flag_bad_run( band_stop_max, head_radius, fd_thresh, + dvars_thresh, + censor_before, + censor_after, + censor_between, ): """Determine if a run has too many high-motion volumes to continue processing. Parameters ---------- - %(fmriprep_confounds_file)s + full_confounds %(dummy_scans)s %(TR)s %(motion_filter_type)s @@ -160,36 +169,40 @@ def flag_bad_run( %(band_stop_max)s %(head_radius)s %(fd_thresh)s - brain_mask + dvars_thresh + censor_before + censor_after + censor_between Returns ------- post_scrubbing_duration : :obj:`float` Amount of time remaining in the run after dummy scan removal, in seconds. """ - if fd_thresh <= 0: + nocensor = all(t <= 0 for t in fd_thresh + dvars_thresh) + if nocensor: # No scrubbing will be performed, so there's no point is calculating amount of "good time". return np.inf dummy_scans = _infer_dummy_scans( dummy_scans=dummy_scans, - confounds_file=fmriprep_confounds_file, + confounds_file=full_confounds, ) # Read in fmriprep confounds tsv to calculate FD - fmriprep_confounds_df = pd.read_table(fmriprep_confounds_file) + fmriprep_confounds_df = pd.read_table(full_confounds) # Remove dummy volumes fmriprep_confounds_df = fmriprep_confounds_df.drop(np.arange(dummy_scans)) - # Calculate filtered FD + # Calculate filtered FD and patch it into the confounds file band_stop_min_adjusted, band_stop_max_adjusted, _ = _modify_motion_filter( motion_filter_type=motion_filter_type, band_stop_min=band_stop_min, band_stop_max=band_stop_max, TR=TR, ) - motion_df = load_motion( + motion_df = filter_motion( fmriprep_confounds_df, TR=TR, motion_filter_type=motion_filter_type, @@ -197,5 +210,19 @@ def flag_bad_run( band_stop_min=band_stop_min_adjusted, band_stop_max=band_stop_max_adjusted, ) - fd_arr = compute_fd(confound=motion_df, head_radius=head_radius) - return np.sum(fd_arr <= fd_thresh) * TR + fmriprep_confounds_df["framewise_displacement"] = compute_fd( + confound=motion_df, + head_radius=head_radius, + ) + + # Calculate outliers to determine the post-censoring duration + outliers_df = calculate_outliers( + confounds=fmriprep_confounds_df, + fd_thresh=fd_thresh, + dvars_thresh=dvars_thresh, + before=censor_before, + after=censor_after, + between=censor_between, + ) + keep_arr = ~outliers_df["interpolation"].astype(bool).values + return np.sum(keep_arr) * TR diff --git a/xcp_d/utils/plotting.py b/xcp_d/utils/plotting.py index 354373282..d44c8333b 100644 --- a/xcp_d/utils/plotting.py +++ b/xcp_d/utils/plotting.py @@ -483,7 +483,7 @@ def plot_fmri_es( preprocessed_bold, denoised_interpolated_bold, TR, - filtered_motion, + modified_full_confounds, temporal_mask, preprocessed_figure, denoised_figure, @@ -501,7 +501,7 @@ def plot_fmri_es( Preprocessed BOLD file, dummy scan removal. %(denoised_interpolated_bold)s %(TR)s - %(filtered_motion)s + %(modified_full_confounds)s %(temporal_mask)s Only non-outlier (low-motion) volumes in the temporal mask will be used to scale the carpet plot. @@ -548,9 +548,9 @@ def plot_fmri_es( } ) - fd_regressor = pd.read_table(filtered_motion)["framewise_displacement"].values + fd_regressor = pd.read_table(modified_full_confounds)["framewise_displacement"].values if temporal_mask: - tmask_arr = pd.read_table(temporal_mask)["framewise_displacement"].values.astype(bool) + tmask_arr = pd.read_table(temporal_mask)["denoising"].values.astype(bool) else: tmask_arr = np.zeros(fd_regressor.shape, dtype=bool) @@ -1112,12 +1112,12 @@ def plot_design_matrix(design_matrix, temporal_mask=None): design_matrix_df = pd.read_table(design_matrix) if temporal_mask: censoring_df = pd.read_table(temporal_mask) - n_motion_outliers = censoring_df["framewise_displacement"].sum() + n_motion_outliers = censoring_df["denoising"].sum() motion_outliers_df = pd.DataFrame( data=np.zeros((censoring_df.shape[0], n_motion_outliers), dtype=np.int16), columns=[f"outlier{i}" for i in range(1, n_motion_outliers + 1)], ) - motion_outlier_idx = np.where(censoring_df["framewise_displacement"])[0] + motion_outlier_idx = np.where(censoring_df["denoising"])[0] for i_outlier, outlier_col in enumerate(motion_outliers_df.columns): outlier_row = motion_outlier_idx[i_outlier] motion_outliers_df.loc[outlier_row, outlier_col] = 1 diff --git a/xcp_d/utils/qcmetrics.py b/xcp_d/utils/qcmetrics.py index 33436be3e..61d1f96c5 100644 --- a/xcp_d/utils/qcmetrics.py +++ b/xcp_d/utils/qcmetrics.py @@ -292,7 +292,7 @@ def compute_dvars( return dvars_nstd, dvars_stdz -def make_dcan_qc_file(filtered_motion, TR): +def make_dcan_qc_file(modified_full_confounds, TR): """Make DCAN HDF5 file from single motion file. NOTE: This is a Node function. @@ -315,17 +315,17 @@ def make_dcan_qc_file(filtered_motion, TR): dcan_df_file = os.path.abspath("desc-dcan_qc.hdf5") - make_dcan_df(filtered_motion, dcan_df_file, TR) + make_dcan_df(modified_full_confounds, dcan_df_file, TR) return dcan_df_file @fill_doc -def make_dcan_df(filtered_motion, name, TR): +def make_dcan_df(modified_full_confounds, name, TR): """Create an HDF5-format file containing a DCAN-format dataset. Parameters ---------- - %(filtered_motion)s + %(modified_full_confounds)s name : :obj:`str` Name of the HDF5-format file to be created. %(TR)s @@ -352,7 +352,7 @@ def make_dcan_df(filtered_motion, name, TR): LOGGER.debug(f"Generating DCAN file: {name}") # Load filtered framewise_displacement values from file - filtered_motion_df = pd.read_table(filtered_motion) + filtered_motion_df = pd.read_table(modified_full_confounds) fd = filtered_motion_df["framewise_displacement"].values with h5py.File(name, "w") as dcan: diff --git a/xcp_d/workflows/base.py b/xcp_d/workflows/base.py index 2a809a7a5..55f78f48c 100644 --- a/xcp_d/workflows/base.py +++ b/xcp_d/workflows/base.py @@ -370,8 +370,8 @@ def init_single_subject_wf(subject_id: str): merge_elements = [ "name_source", "preprocessed_bold", - "fmriprep_confounds_file", - "filtered_motion", + "full_confounds", + "modified_full_confounds", "temporal_mask", "denoised_interpolated_bold", "censored_denoised_bold", @@ -398,7 +398,7 @@ def init_single_subject_wf(subject_id: str): ) post_scrubbing_duration = flag_bad_run( - fmriprep_confounds_file=run_data["confounds"], + full_confounds=run_data["confounds"], dummy_scans=config.workflow.dummy_scans, TR=run_data["bold_metadata"]["RepetitionTime"], motion_filter_type=config.workflow.motion_filter_type, @@ -407,6 +407,10 @@ def init_single_subject_wf(subject_id: str): band_stop_max=config.workflow.band_stop_max, head_radius=head_radius, fd_thresh=config.workflow.fd_thresh, + dvars_thresh=config.workflow.dvars_thresh, + censor_before=config.workflow.censor_before, + censor_after=config.workflow.censor_after, + censor_between=config.workflow.censor_between, ) # Reduce exact_times to only include values greater than the post-scrubbing duration. if (config.workflow.min_time >= 0) and ( diff --git a/xcp_d/workflows/bold.py b/xcp_d/workflows/bold.py index 4d6b1e3b7..87d318ab3 100644 --- a/xcp_d/workflows/bold.py +++ b/xcp_d/workflows/bold.py @@ -113,8 +113,8 @@ def init_postprocess_nifti_wf( anat_brainmask T1w brain mask, used for transforms in the QC report workflow. Fed from the subject workflow. - %(fmriprep_confounds_file)s - fmriprep_confounds_json + full_confounds + full_confounds_json %(dummy_scans)s Outputs @@ -122,9 +122,9 @@ def init_postprocess_nifti_wf( %(name_source)s preprocessed_bold : :obj:`str` The preprocessed BOLD file, after dummy scan removal. - %(filtered_motion)s + modified_full_confounds %(temporal_mask)s - %(fmriprep_confounds_file)s + full_confounds After dummy scan removal. %(denoised_interpolated_bold)s %(smoothed_denoised_bold)s @@ -160,8 +160,8 @@ def init_postprocess_nifti_wf( "t1w", "t2w", "anat_brainmask", - "fmriprep_confounds_file", - "fmriprep_confounds_json", + "full_confounds", + "full_confounds_json", "dummy_scans", # if parcellation is performed "atlases", @@ -175,8 +175,8 @@ def init_postprocess_nifti_wf( inputnode.inputs.bold_file = bold_file inputnode.inputs.boldref = run_data["boldref"] inputnode.inputs.bold_mask = run_data["boldmask"] - inputnode.inputs.fmriprep_confounds_file = run_data["confounds"] - inputnode.inputs.fmriprep_confounds_json = run_data["confounds_json"] + inputnode.inputs.full_confounds = run_data["confounds"] + inputnode.inputs.full_confounds_json = run_data["confounds_json"] inputnode.inputs.dummy_scans = dummy_scans inputnode.inputs.atlases = atlases @@ -200,8 +200,8 @@ def init_postprocess_nifti_wf( fields=[ "name_source", "preprocessed_bold", - "fmriprep_confounds_file", - "filtered_motion", + "full_confounds", + "modified_full_confounds", "temporal_mask", "denoised_interpolated_bold", "censored_denoised_bold", @@ -248,12 +248,12 @@ def init_postprocess_nifti_wf( workflow.connect([ (inputnode, prepare_confounds_wf, [ ("bold_file", "inputnode.name_source"), - ("fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), - ("fmriprep_confounds_json", "inputnode.fmriprep_confounds_json"), + ("full_confounds", "inputnode.full_confounds"), + ("full_confounds_json", "inputnode.full_confounds_json"), ]), (downcast_data, prepare_confounds_wf, [("bold_file", "inputnode.preprocessed_bold")]), (prepare_confounds_wf, outputnode, [ - ("outputnode.fmriprep_confounds_file", "fmriprep_confounds_file"), + ("outputnode.full_confounds", "full_confounds"), ("outputnode.preprocessed_bold", "preprocessed_bold"), ]), ]) # fmt:skip @@ -264,7 +264,7 @@ def init_postprocess_nifti_wf( (downcast_data, denoise_bold_wf, [("bold_mask", "inputnode.mask")]), (prepare_confounds_wf, denoise_bold_wf, [ ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.confounds_file", "inputnode.confounds_file"), + ("outputnode.design_matrix", "inputnode.design_matrix"), ]), ]) # fmt:skip @@ -326,9 +326,9 @@ def init_postprocess_nifti_wf( (prepare_confounds_wf, qc_report_wf, [ ("outputnode.preprocessed_bold", "inputnode.preprocessed_bold"), ("outputnode.dummy_scans", "inputnode.dummy_scans"), - ("outputnode.fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), + ("outputnode.full_confounds", "inputnode.full_confounds"), ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.filtered_motion", "inputnode.filtered_motion"), + ("outputnode.modified_full_confounds", "inputnode.modified_full_confounds"), ]), (denoise_bold_wf, qc_report_wf, [ ("outputnode.denoised_interpolated_bold", "inputnode.denoised_interpolated_bold"), @@ -345,14 +345,17 @@ def init_postprocess_nifti_wf( workflow.connect([ (inputnode, postproc_derivatives_wf, [ - ("fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), + ("full_confounds", "inputnode.full_confounds"), ("atlas_files", "inputnode.atlas_files"), ]), (prepare_confounds_wf, postproc_derivatives_wf, [ - ("outputnode.confounds_file", "inputnode.confounds_file"), - ("outputnode.confounds_metadata", "inputnode.confounds_metadata"), - ("outputnode.filtered_motion", "inputnode.filtered_motion"), - ("outputnode.motion_metadata", "inputnode.motion_metadata"), + ("outputnode.design_matrix", "inputnode.design_matrix"), + ("outputnode.design_matrix_metadata", "inputnode.design_matrix_metadata"), + ("outputnode.modified_full_confounds", "inputnode.modified_full_confounds"), + ( + "outputnode.modified_full_confounds_metadata", + "inputnode.modified_full_confounds_metadata", + ), ("outputnode.temporal_mask", "inputnode.temporal_mask"), ("outputnode.temporal_mask_metadata", "inputnode.temporal_mask_metadata"), ]), @@ -364,7 +367,7 @@ def init_postprocess_nifti_wf( (qc_report_wf, postproc_derivatives_wf, [("outputnode.qc_file", "inputnode.qc_file")]), (reho_wf, postproc_derivatives_wf, [("outputnode.reho", "inputnode.reho")]), (postproc_derivatives_wf, outputnode, [ - ("outputnode.filtered_motion", "filtered_motion"), + ("outputnode.modified_full_confounds", "modified_full_confounds"), ("outputnode.temporal_mask", "temporal_mask"), ("outputnode.denoised_interpolated_bold", "denoised_interpolated_bold"), ("outputnode.censored_denoised_bold", "censored_denoised_bold"), diff --git a/xcp_d/workflows/cifti.py b/xcp_d/workflows/cifti.py index be535ee6c..a47b3c9b5 100644 --- a/xcp_d/workflows/cifti.py +++ b/xcp_d/workflows/cifti.py @@ -102,8 +102,8 @@ def init_postprocess_cifti_wf( t2w Preprocessed T2w image, warped to standard space. Fed from the subject workflow. - %(fmriprep_confounds_file)s - fmriprep_confounds_json + full_confounds + full_confounds_json %(dummy_scans)s Outputs @@ -111,9 +111,9 @@ def init_postprocess_cifti_wf( %(name_source)s preprocessed_bold : :obj:`str` The preprocessed BOLD file, after dummy scan removal. - %(fmriprep_confounds_file)s + full_confounds After dummy scan removal. - %(filtered_motion)s + modified_full_confounds %(temporal_mask)s %(denoised_interpolated_bold)s %(censored_denoised_bold)s @@ -147,8 +147,8 @@ def init_postprocess_cifti_wf( "custom_confounds_file", "t1w", "t2w", - "fmriprep_confounds_file", - "fmriprep_confounds_json", + "full_confounds", + "full_confounds_json", "dummy_scans", # if parcellation is performed "atlases", @@ -161,8 +161,8 @@ def init_postprocess_cifti_wf( inputnode.inputs.bold_file = bold_file inputnode.inputs.boldref = run_data["boldref"] - inputnode.inputs.fmriprep_confounds_file = run_data["confounds"] - inputnode.inputs.fmriprep_confounds_json = run_data["confounds_json"] + inputnode.inputs.full_confounds = run_data["confounds"] + inputnode.inputs.full_confounds_json = run_data["confounds_json"] inputnode.inputs.dummy_scans = dummy_scans inputnode.inputs.atlases = atlases @@ -188,8 +188,8 @@ def init_postprocess_cifti_wf( fields=[ "name_source", "preprocessed_bold", - "fmriprep_confounds_file", - "filtered_motion", + "full_confounds", + "modified_full_confounds", "temporal_mask", "denoised_interpolated_bold", "censored_denoised_bold", @@ -231,14 +231,14 @@ def init_postprocess_cifti_wf( workflow.connect([ (inputnode, prepare_confounds_wf, [ ("bold_file", "inputnode.name_source"), - ("fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), - ("fmriprep_confounds_json", "inputnode.fmriprep_confounds_json"), + ("full_confounds", "inputnode.full_confounds"), + ("full_confounds_json", "inputnode.full_confounds_json"), ]), (downcast_data, prepare_confounds_wf, [ ("bold_file", "inputnode.preprocessed_bold"), ]), (prepare_confounds_wf, outputnode, [ - ("outputnode.fmriprep_confounds_file", "fmriprep_confounds_file"), + ("outputnode.full_confounds", "full_confounds"), ("outputnode.preprocessed_bold", "preprocessed_bold"), ]), ]) # fmt:skip @@ -248,7 +248,7 @@ def init_postprocess_cifti_wf( workflow.connect([ (prepare_confounds_wf, denoise_bold_wf, [ ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.confounds_file", "inputnode.confounds_file"), + ("outputnode.design_matrix", "inputnode.design_matrix"), ]), ]) # fmt:skip @@ -302,9 +302,9 @@ def init_postprocess_cifti_wf( (prepare_confounds_wf, qc_report_wf, [ ("outputnode.preprocessed_bold", "inputnode.preprocessed_bold"), ("outputnode.dummy_scans", "inputnode.dummy_scans"), - ("outputnode.fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), + ("outputnode.full_confounds", "inputnode.full_confounds"), ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.filtered_motion", "inputnode.filtered_motion"), + ("outputnode.modified_full_confounds", "inputnode.modified_full_confounds"), ]), (denoise_bold_wf, qc_report_wf, [ ("outputnode.denoised_interpolated_bold", "inputnode.denoised_interpolated_bold"), @@ -321,7 +321,7 @@ def init_postprocess_cifti_wf( workflow.connect([ (inputnode, postproc_derivatives_wf, [ - ("fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), + ("full_confounds", "inputnode.full_confounds"), ("atlas_files", "inputnode.atlas_files"), ]), (denoise_bold_wf, postproc_derivatives_wf, [ @@ -331,16 +331,19 @@ def init_postprocess_cifti_wf( ]), (qc_report_wf, postproc_derivatives_wf, [("outputnode.qc_file", "inputnode.qc_file")]), (prepare_confounds_wf, postproc_derivatives_wf, [ - ("outputnode.confounds_file", "inputnode.confounds_file"), - ("outputnode.confounds_metadata", "inputnode.confounds_metadata"), - ("outputnode.filtered_motion", "inputnode.filtered_motion"), - ("outputnode.motion_metadata", "inputnode.motion_metadata"), + ("outputnode.design_matrix", "inputnode.design_matrix"), + ("outputnode.design_matrix_metadata", "inputnode.design_matrix_metadata"), + ("outputnode.modified_full_confounds", "inputnode.modified_full_confounds"), + ( + "outputnode.modified_full_confounds_metadata", + "inputnode.modified_full_confounds_metadata", + ), ("outputnode.temporal_mask", "inputnode.temporal_mask"), ("outputnode.temporal_mask_metadata", "inputnode.temporal_mask_metadata"), ]), (reho_wf, postproc_derivatives_wf, [("outputnode.reho", "inputnode.reho")]), (postproc_derivatives_wf, outputnode, [ - ("outputnode.filtered_motion", "filtered_motion"), + ("outputnode.modified_full_confounds", "modified_full_confounds"), ("outputnode.temporal_mask", "temporal_mask"), ("outputnode.denoised_interpolated_bold", "denoised_interpolated_bold"), ("outputnode.censored_denoised_bold", "censored_denoised_bold"), diff --git a/xcp_d/workflows/concatenation.py b/xcp_d/workflows/concatenation.py index b07072877..6d663751a 100644 --- a/xcp_d/workflows/concatenation.py +++ b/xcp_d/workflows/concatenation.py @@ -52,7 +52,7 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): These are used as the bases for concatenated output filenames. preprocessed_bold : :obj:`list` of :obj:`str` The preprocessed BOLD files, after dummy volume removal. - %(filtered_motion)s + %(modified_full_confounds)s One list entry for each run. %(temporal_mask)s One list entry for each run. @@ -74,13 +74,13 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): workflow = Workflow(name=name) output_dir = config.execution.xcp_d_dir - motion_filter_type = config.workflow.motion_filter_type smoothing = config.workflow.smoothing cifti = config.workflow.cifti dcan_qc = config.workflow.dcan_qc - fd_thresh = config.workflow.fd_thresh atlases = config.execution.atlases + censor = any(t > 0 for t in config.workflow.fd_thresh + config.workflow.dvars_thresh) + workflow.__desc__ = """ Postprocessing derivatives from multi-run tasks were then concatenated across runs and directions. """ @@ -90,8 +90,8 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): fields=[ "name_source", "preprocessed_bold", - "fmriprep_confounds_file", - "filtered_motion", + "full_confounds", + "modified_full_confounds", "temporal_mask", "denoised_interpolated_bold", "censored_denoised_bold", @@ -121,8 +121,8 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): workflow.connect([ (inputnode, filter_runs, [ ("preprocessed_bold", "preprocessed_bold"), - ("fmriprep_confounds_file", "fmriprep_confounds_file"), - ("filtered_motion", "filtered_motion"), + ("full_confounds", "full_confounds"), + ("modified_full_confounds", "modified_full_confounds"), ("temporal_mask", "temporal_mask"), ("denoised_interpolated_bold", "denoised_interpolated_bold"), ("censored_denoised_bold", "censored_denoised_bold"), @@ -142,8 +142,8 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): workflow.connect([ (filter_runs, concatenate_inputs, [ ("preprocessed_bold", "preprocessed_bold"), - ("fmriprep_confounds_file", "fmriprep_confounds_file"), - ("filtered_motion", "filtered_motion"), + ("full_confounds", "full_confounds"), + ("modified_full_confounds", "modified_full_confounds"), ("temporal_mask", "temporal_mask"), ("denoised_interpolated_bold", "denoised_interpolated_bold"), ("censored_denoised_bold", "censored_denoised_bold"), @@ -176,8 +176,8 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): ("preprocessed_bold", "inputnode.preprocessed_bold"), ("denoised_interpolated_bold", "inputnode.denoised_interpolated_bold"), ("censored_denoised_bold", "inputnode.censored_denoised_bold"), - ("fmriprep_confounds_file", "inputnode.fmriprep_confounds_file"), - ("filtered_motion", "inputnode.filtered_motion"), + ("full_confounds", "inputnode.full_confounds"), + ("modified_full_confounds", "inputnode.modified_full_confounds"), ("temporal_mask", "inputnode.temporal_mask"), ("run_index", "inputnode.run_index"), ]), @@ -187,8 +187,8 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): DerivativesDataSink( base_directory=output_dir, dismiss_entities=["segmentation", "den", "res", "space", "cohort", "desc"], - desc="filtered" if motion_filter_type else None, - suffix="motion", + desc="confounds", + suffix="timeseries", extension=".tsv", ), name="ds_filtered_motion", @@ -197,13 +197,13 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): ) workflow.connect([ (clean_name_source, ds_filtered_motion, [("name_source", "source_file")]), - (concatenate_inputs, ds_filtered_motion, [("filtered_motion", "in_file")]), + (concatenate_inputs, ds_filtered_motion, [("modified_full_confounds", "in_file")]), (filter_runs, ds_filtered_motion, [ - (("filtered_motion", _make_xcpd_uri, output_dir), "Sources"), + (("modified_full_confounds", _make_xcpd_uri, output_dir), "Sources"), ]), ]) # fmt:skip - if fd_thresh > 0: + if censor: ds_temporal_mask = pe.Node( DerivativesDataSink( base_directory=output_dir, @@ -252,7 +252,7 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): mem_gb=2, ) - if dcan_qc and (fd_thresh > 0): + if dcan_qc and censor: ds_interpolated_filtered_bold = pe.Node( DerivativesDataSink( base_directory=output_dir, @@ -292,7 +292,7 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): mem_gb=2, ) - if dcan_qc and (fd_thresh > 0): + if dcan_qc and censor: ds_interpolated_filtered_bold = pe.Node( DerivativesDataSink( base_directory=output_dir, @@ -324,7 +324,7 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): ]), ]) # fmt:skip - if dcan_qc and (fd_thresh > 0): + if dcan_qc and censor: workflow.connect([ (clean_name_source, ds_interpolated_filtered_bold, [("name_source", "source_file")]), (concatenate_inputs, ds_interpolated_filtered_bold, [ diff --git a/xcp_d/workflows/outputs.py b/xcp_d/workflows/outputs.py index 99285cbbf..01fc78204 100644 --- a/xcp_d/workflows/outputs.py +++ b/xcp_d/workflows/outputs.py @@ -187,10 +187,10 @@ def init_postproc_derivatives_wf( smoothed alff reho parcellated_reho - confounds_file - confounds_metadata - %(filtered_motion)s - motion_metadata + design_matrix + design_matrix_metadata + %(modified_full_confounds)s + modified_full_confounds_metadata %(temporal_mask)s temporal_mask_metadata %(dummy_scans)s @@ -202,8 +202,6 @@ def init_postproc_derivatives_wf( low_pass = config.workflow.low_pass high_pass = config.workflow.high_pass bpf_order = config.workflow.bpf_order - fd_thresh = config.workflow.fd_thresh - motion_filter_type = config.workflow.motion_filter_type smoothing = config.workflow.smoothing params = config.workflow.params atlases = config.execution.atlases @@ -211,15 +209,17 @@ def init_postproc_derivatives_wf( dcan_qc = config.workflow.dcan_qc output_dir = config.execution.xcp_d_dir + censor = any(t > 0 for t in config.workflow.fd_thresh + config.workflow.dvars_thresh) + inputnode = pe.Node( niu.IdentityInterface( fields=[ # preprocessing files to use as sources - "fmriprep_confounds_file", + "full_confounds", # postprocessed outputs "atlas_files", # for Sources - "confounds_file", - "confounds_metadata", + "design_matrix", + "design_matrix_metadata", "coverage", "timeseries", "correlations", @@ -233,8 +233,8 @@ def init_postproc_derivatives_wf( "smoothed_alff", "reho", "parcellated_reho", - "filtered_motion", - "motion_metadata", + "modified_full_confounds", + "modified_full_confounds_metadata", "temporal_mask", "temporal_mask_metadata", "dummy_scans", @@ -253,7 +253,7 @@ def init_postproc_derivatives_wf( outputnode = pe.Node( niu.IdentityInterface( fields=[ - "filtered_motion", + "modified_full_confounds", "temporal_mask", "denoised_interpolated_bold", "censored_denoised_bold", @@ -295,39 +295,39 @@ def init_postproc_derivatives_wf( preproc_bold_src = _make_preproc_uri(name_source, fmri_dir) - ds_filtered_motion = pe.Node( + ds_modified_full_confounds = pe.Node( DerivativesDataSink( base_directory=output_dir, source_file=name_source, dismiss_entities=["segmentation", "den", "res", "space", "cohort", "desc"], - desc="filtered" if motion_filter_type else None, - suffix="motion", + desc="confounds", + suffix="timeseries", extension=".tsv", ), - name="ds_filtered_motion", + name="ds_modified_full_confounds", run_without_submitting=True, mem_gb=1, ) # fmt:off workflow.connect([ - (inputnode, ds_filtered_motion, [ - ("motion_metadata", "meta_dict"), - ("filtered_motion", "in_file"), - (("fmriprep_confounds_file", _make_preproc_uri, fmri_dir), "Sources"), + (inputnode, ds_modified_full_confounds, [ + ("modified_full_confounds_metadata", "meta_dict"), + ("modified_full_confounds", "in_file"), + (("full_confounds", _make_preproc_uri, fmri_dir), "Sources"), ]), - (ds_filtered_motion, outputnode, [("out_file", "filtered_motion")]), + (ds_modified_full_confounds, outputnode, [("out_file", "modified_full_confounds")]), ]) # fmt:on merge_dense_src = pe.Node( - niu.Merge(numinputs=(1 + (1 if fd_thresh > 0 else 0) + (1 if params != "none" else 0))), + niu.Merge(numinputs=(1 + (1 if censor else 0) + (1 if params != "none" else 0))), name="merge_dense_src", run_without_submitting=True, mem_gb=1, ) merge_dense_src.inputs.in1 = preproc_bold_src - if fd_thresh > 0: + if censor: ds_temporal_mask = pe.Node( DerivativesDataSink( base_directory=output_dir, @@ -336,7 +336,8 @@ def init_postproc_derivatives_wf( extension=".tsv", source_file=name_source, # Metadata - Threshold=fd_thresh, + FramewiseDisplacementThreshold=config.workflow.fd_thresh, + DVARSThreshold=config.workflow.dvars_thresh, ), name="ds_temporal_mask", run_without_submitting=True, @@ -348,7 +349,7 @@ def init_postproc_derivatives_wf( ("temporal_mask_metadata", "meta_dict"), ("temporal_mask", "in_file"), ]), - (ds_filtered_motion, ds_temporal_mask, [ + (ds_modified_full_confounds, ds_temporal_mask, [ (("out_file", _make_xcpd_uri, output_dir), "Sources"), ]), (ds_temporal_mask, outputnode, [("out_file", "temporal_mask")]), @@ -360,15 +361,13 @@ def init_postproc_derivatives_wf( if params != "none": confounds_src = pe.Node( - niu.Merge( - numinputs=(1 + (1 if fd_thresh > 0 else 0) + (1 if custom_confounds_file else 0)) - ), + niu.Merge(numinputs=(1 + (1 if censor else 0) + (1 if custom_confounds_file else 0))), name="confounds_src", run_without_submitting=True, mem_gb=1, ) - workflow.connect([(inputnode, confounds_src, [("fmriprep_confounds_file", "in1")])]) - if fd_thresh > 0: + workflow.connect([(inputnode, confounds_src, [("full_confounds", "in1")])]) + if censor: # fmt:off workflow.connect([ (ds_temporal_mask, confounds_src, [ @@ -383,7 +382,7 @@ def init_postproc_derivatives_wf( elif custom_confounds_file: confounds_src.inputs.in2 = _make_custom_uri(custom_confounds_file) - ds_confounds = pe.Node( + ds_design_matrix = pe.Node( DerivativesDataSink( base_directory=output_dir, source_file=name_source, @@ -392,18 +391,18 @@ def init_postproc_derivatives_wf( suffix="design", extension=".tsv", ), - name="ds_confounds", + name="ds_design_matrix", run_without_submitting=False, ) # fmt:off workflow.connect([ - (inputnode, ds_confounds, [ - ("confounds_file", "in_file"), - ("confounds_metadata", "meta_dict"), + (inputnode, ds_design_matrix, [ + ("design_matrix", "in_file"), + ("design_matrix_metadata", "meta_dict"), ]), - (confounds_src, ds_confounds, [("out", "Sources")]), - (ds_confounds, merge_dense_src, [ - (("out_file", _make_xcpd_uri, output_dir), f"in{3 if fd_thresh > 0 else 2}"), + (confounds_src, ds_design_matrix, [("out", "Sources")]), + (ds_design_matrix, merge_dense_src, [ + (("out_file", _make_xcpd_uri, output_dir), f"in{3 if censor else 2}"), ]), ]) # fmt:on @@ -434,7 +433,7 @@ def init_postproc_derivatives_wf( ]) # fmt:on - if dcan_qc and (fd_thresh > 0): + if dcan_qc and censor: ds_interpolated_denoised_bold = pe.Node( DerivativesDataSink( base_directory=output_dir, diff --git a/xcp_d/workflows/plotting.py b/xcp_d/workflows/plotting.py index d8c31c43b..cecb2d97a 100644 --- a/xcp_d/workflows/plotting.py +++ b/xcp_d/workflows/plotting.py @@ -67,9 +67,9 @@ def init_qc_report_wf( %(template_to_anat_xfm)s Only used with non-CIFTI data. %(dummy_scans)s - %(fmriprep_confounds_file)s + full_confounds %(temporal_mask)s - %(filtered_motion)s + modified_full_confounds Outputs ------- @@ -91,8 +91,8 @@ def init_qc_report_wf( "denoised_interpolated_bold", "censored_denoised_bold", "dummy_scans", - "fmriprep_confounds_file", - "filtered_motion", + "full_confounds", + "modified_full_confounds", "temporal_mask", "run_index", # will only be set for concatenated data # nifti-only inputs @@ -280,7 +280,7 @@ def init_qc_report_wf( ("name_source", "name_source"), ("preprocessed_bold", "bold_file"), ("censored_denoised_bold", "cleaned_file"), - ("fmriprep_confounds_file", "fmriprep_confounds_file"), + ("full_confounds", "full_confounds"), ("temporal_mask", "temporal_mask"), ("dummy_scans", "dummy_scans"), ]), @@ -311,7 +311,7 @@ def init_qc_report_wf( if dcan_qc: make_dcan_qc_file_node = pe.Node( Function( - input_names=["filtered_motion", "TR"], + input_names=["modified_full_confounds", "TR"], output_names=["dcan_df_file"], function=make_dcan_qc_file, ), @@ -321,7 +321,9 @@ def init_qc_report_wf( # fmt:off workflow.connect([ - (inputnode, make_dcan_qc_file_node, [("filtered_motion", "filtered_motion")]), + (inputnode, make_dcan_qc_file_node, [ + ("modified_full_confounds", "modified_full_confounds"), + ]), ]) # fmt:on @@ -357,7 +359,7 @@ def init_qc_report_wf( (inputnode, plot_execsummary_carpets_dcan, [ ("preprocessed_bold", "preprocessed_bold"), ("denoised_interpolated_bold", "denoised_interpolated_bold"), - ("filtered_motion", "filtered_motion"), + ("modified_full_confounds", "modified_full_confounds"), ("temporal_mask", "temporal_mask"), ("run_index", "run_index"), ]), diff --git a/xcp_d/workflows/postprocessing.py b/xcp_d/workflows/postprocessing.py index 979e0aabf..9e423b133 100644 --- a/xcp_d/workflows/postprocessing.py +++ b/xcp_d/workflows/postprocessing.py @@ -12,7 +12,9 @@ from xcp_d.interfaces.bids import DerivativesDataSink from xcp_d.interfaces.censoring import ( Censor, - GenerateConfounds, + GenerateDesignMatrix, + GenerateTemporalMask, + ModifyConfounds, RandomCensor, RemoveDummyVolumes, ) @@ -75,8 +77,8 @@ def init_prepare_confounds_wf( ------ %(name_source)s preprocessed_bold : :obj:`str` - %(fmriprep_confounds_file)s - fmriprep_confounds_json : :obj:`str` + full_confounds + full_confounds_json : :obj:`str` JSON file associated with the fMRIPrep confounds TSV. %(custom_confounds_file)s %(dummy_scans)s @@ -85,14 +87,16 @@ def init_prepare_confounds_wf( Outputs ------- preprocessed_bold : :obj:`str` - %(fmriprep_confounds_file)s - confounds_file : :obj:`str` + full_confounds + design_matrix : :obj:`str` The selected confounds, potentially including custom confounds, after dummy scan removal. - confounds_metadata : :obj:`dict` + design_matrix_metadata : :obj:`dict` %(dummy_scans)s If originally set to "auto", this output will have the actual number of dummy volumes. - %(filtered_motion)s - motion_metadata : :obj:`dict` + modified_full_confounds + ``full_confounds`` file after filtering motion parameters and recalculating framewise + displacement. + modified_full_confounds_metadata : :obj:`dict` %(temporal_mask)s temporal_mask_metadata : :obj:`dict` """ @@ -106,7 +110,6 @@ def init_prepare_confounds_wf( band_stop_min = config.workflow.band_stop_min band_stop_max = config.workflow.band_stop_max motion_filter_order = config.workflow.motion_filter_order - fd_thresh = config.workflow.fd_thresh omp_nthreads = config.nipype.omp_nthreads dummy_scans_str = "" @@ -135,10 +138,15 @@ def init_prepare_confounds_wf( TR=TR, ) - if (fd_thresh > 0) or exact_scans: + censor = any(t > 0 for t in config.workflow.fd_thresh + config.workflow.dvars_thresh) + if censor or exact_scans: censoring_description = describe_censoring( motion_filter_type=motion_filter_type, - fd_thresh=fd_thresh, + fd_thresh=config.workflow.fd_thresh, + dvars_thresh=config.workflow.dvars_thresh, + censor_before=config.workflow.censor_before, + censor_after=config.workflow.censor_after, + censor_between=config.workflow.censor_between, exact_scans=exact_scans, ) else: @@ -159,8 +167,8 @@ def init_prepare_confounds_wf( fields=[ "name_source", "preprocessed_bold", - "fmriprep_confounds_file", - "fmriprep_confounds_json", + "full_confounds", + "full_confounds_json", "custom_confounds_file", "dummy_scans", ], @@ -174,50 +182,79 @@ def init_prepare_confounds_wf( niu.IdentityInterface( fields=[ "preprocessed_bold", - "fmriprep_confounds_file", # used to calculate motion in concatenation workflow - "confounds_file", - "confounds_metadata", - "dummy_scans", - "filtered_motion", - "motion_metadata", + "full_confounds", # original confounds after dummy scan removal + "modified_full_confounds", # modified confounds after motion filtering + "modified_full_confounds_metadata", "temporal_mask", "temporal_mask_metadata", + "design_matrix", + "design_matrix_metadata", + "dummy_scans", ], ), name="outputnode", ) - generate_confounds = pe.Node( - GenerateConfounds( - params=params, + filter_motion = pe.Node( + ModifyConfounds( TR=TR, - band_stop_min=band_stop_min, - band_stop_max=band_stop_max, motion_filter_type=motion_filter_type, motion_filter_order=motion_filter_order, - fd_thresh=fd_thresh, + band_stop_min=band_stop_min, + band_stop_max=band_stop_max, head_radius=head_radius, ), - name="generate_confounds", - mem_gb=2, + name="filter_motion", + mem_gb=0.1, + omp_nthreads=omp_nthreads, + ) + workflow.connect([ + (inputnode, filter_motion, [ + ("full_confounds", "full_confounds"), + ("full_confounds_json", "full_confounds_json"), + ]), + (filter_motion, outputnode, [ + ("modified_full_confounds_metadata", "modified_full_confounds_metadata"), + ]), + ]) # fmt:skip + + generate_temporal_mask = pe.Node( + GenerateTemporalMask( + fd_thresh=config.workflow.fd_thresh, + dvars_thresh=config.workflow.dvars_thresh, + censor_before=config.workflow.censor_before, + censor_after=config.workflow.censor_after, + censor_between=config.workflow.censor_between, + ), + name="generate_temporal_mask", + mem_gb=0.1, + omp_nthreads=omp_nthreads, + ) + workflow.connect([ + (filter_motion, generate_temporal_mask, [("modified_full_confounds", "full_confounds")]), + ]) # fmt:skip + + generate_design_matrix = pe.Node( + GenerateDesignMatrix(params=params), + name="generate_design_matrix", + mem_gb=0.1, omp_nthreads=omp_nthreads, ) # Load and filter confounds - # fmt:off workflow.connect([ - (inputnode, generate_confounds, [ + (inputnode, generate_design_matrix, [ ("name_source", "in_file"), - ("fmriprep_confounds_file", "fmriprep_confounds_file"), - ("fmriprep_confounds_json", "fmriprep_confounds_json"), ("custom_confounds_file", "custom_confounds_file"), ]), - (generate_confounds, outputnode, [ - ("motion_metadata", "motion_metadata"), - ("confounds_metadata", "confounds_metadata"), + (filter_motion, generate_design_matrix, [ + ("modified_full_confounds", "full_confounds"), + ("modified_full_confounds_metadata", "full_confounds_metadata"), ]), - ]) - # fmt:on + (generate_design_matrix, outputnode, [ + ("design_matrix_metadata", "design_matrix_metadata"), + ]), + ]) # fmt:skip # A buffer node to hold either the original files or the files with the first N vols removed. dummy_scan_buffer = pe.Node( @@ -225,11 +262,11 @@ def init_prepare_confounds_wf( fields=[ "preprocessed_bold", "dummy_scans", - "fmriprep_confounds_file", - "confounds_file", - "motion_file", + "full_confounds", + "modified_full_confounds", "temporal_mask", - ] + "design_matrix", + ], ), name="dummy_scan_buffer", ) @@ -241,78 +278,85 @@ def init_prepare_confounds_wf( mem_gb=4, ) - # fmt:off workflow.connect([ (inputnode, remove_dummy_scans, [ ("preprocessed_bold", "bold_file"), ("dummy_scans", "dummy_scans"), + ("full_confounds", "full_confounds"), ]), - (generate_confounds, remove_dummy_scans, [ - ("confounds_file", "confounds_file"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), - # fMRIPrep confounds file is needed for filtered motion. - # The selected confounds are not guaranteed to include motion params. - ("filtered_confounds_file", "fmriprep_confounds_file"), + (filter_motion, remove_dummy_scans, [ + ("modified_full_confounds", "modified_full_confounds"), ]), + (generate_temporal_mask, remove_dummy_scans, [("temporal_mask", "temporal_mask")]), + (generate_design_matrix, remove_dummy_scans, [("design_matrix", "design_matrix")]), (remove_dummy_scans, dummy_scan_buffer, [ + ("dummy_scans", "dummy_scans"), ("bold_file_dropped_TR", "preprocessed_bold"), - ("fmriprep_confounds_file_dropped_TR", "fmriprep_confounds_file"), - ("confounds_file_dropped_TR", "confounds_file"), - ("motion_file_dropped_TR", "motion_file"), + ("full_confounds_dropped_TR", "full_confounds"), + ("modified_full_confounds_dropped_TR", "modified_full_confounds"), + ("design_matrix_dropped_TR", "design_matrix"), ("temporal_mask_dropped_TR", "temporal_mask"), - ("dummy_scans", "dummy_scans"), ]), - ]) - # fmt:on + ]) # fmt:skip else: - # fmt:off workflow.connect([ (inputnode, dummy_scan_buffer, [ ("dummy_scans", "dummy_scans"), ("preprocessed_bold", "preprocessed_bold"), + ("full_confounds", "full_confounds"), ]), - (generate_confounds, dummy_scan_buffer, [ - ("confounds_file", "confounds_file"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), - # fMRIPrep confounds file is needed for filtered motion. - # The selected confounds are not guaranteed to include motion params. - ("filtered_confounds_file", "fmriprep_confounds_file"), + (filter_motion, dummy_scan_buffer, [ + ("modified_full_confounds", "modified_full_confounds"), ]), - ]) - # fmt:on + (generate_temporal_mask, dummy_scan_buffer, [("temporal_mask", "temporal_mask")]), + (generate_design_matrix, dummy_scan_buffer, [("design_matrix", "design_matrix")]), + ]) # fmt:skip - # fmt:off workflow.connect([ (dummy_scan_buffer, outputnode, [ - ("preprocessed_bold", "preprocessed_bold"), - ("fmriprep_confounds_file", "fmriprep_confounds_file"), - ("confounds_file", "confounds_file"), - ("motion_file", "filtered_motion"), ("dummy_scans", "dummy_scans"), + ("preprocessed_bold", "preprocessed_bold"), + ("full_confounds", "full_confounds"), + ("modified_full_confounds", "modified_full_confounds"), + ("design_matrix", "design_matrix"), ]), - ]) - # fmt:on + ]) # fmt:skip - random_censor = pe.Node( - RandomCensor(exact_scans=exact_scans, random_seed=random_seed), - name="random_censor", + tmask_buffer = pe.Node( + niu.IdentityInterface(fields=["temporal_mask", "temporal_mask_metadata"]), + name="tmask_buffer", ) + if exact_scans: + random_censor = pe.Node( + RandomCensor(exact_scans=exact_scans, random_seed=random_seed), + name="random_censor", + ) + + workflow.connect([ + (generate_temporal_mask, random_censor, [ + ("temporal_mask_metadata", "temporal_mask_metadata"), + ]), + (dummy_scan_buffer, random_censor, [("temporal_mask", "temporal_mask")]), + (random_censor, tmask_buffer, [ + ("temporal_mask", "temporal_mask"), + ("temporal_mask_metadata", "temporal_mask_metadata"), + ]), + ]) # fmt:skip + else: + workflow.connect([ + (generate_temporal_mask, tmask_buffer, [ + ("temporal_mask_metadata", "temporal_mask_metadata"), + ]), + (dummy_scan_buffer, tmask_buffer, [("temporal_mask", "temporal_mask")]), + ]) # fmt:skip - # fmt:off workflow.connect([ - (generate_confounds, random_censor, [ - ("temporal_mask_metadata", "temporal_mask_metadata"), - ]), - (dummy_scan_buffer, random_censor, [("temporal_mask", "temporal_mask")]), - (random_censor, outputnode, [ + (tmask_buffer, outputnode, [ ("temporal_mask", "temporal_mask"), ("temporal_mask_metadata", "temporal_mask_metadata"), ]), - ]) - # fmt:on + ]) # fmt:skip if params != "none": plot_design_matrix = pe.Node( @@ -324,12 +368,10 @@ def init_prepare_confounds_wf( name="plot_design_matrix", ) - # fmt:off workflow.connect([ - (dummy_scan_buffer, plot_design_matrix, [("confounds_file", "design_matrix")]), - (random_censor, plot_design_matrix, [("temporal_mask", "temporal_mask")]), - ]) - # fmt:on + (dummy_scan_buffer, plot_design_matrix, [("design_matrix", "design_matrix")]), + (tmask_buffer, plot_design_matrix, [("temporal_mask", "temporal_mask")]), + ]) # fmt:skip ds_design_matrix_plot = pe.Node( DerivativesDataSink( @@ -343,18 +385,20 @@ def init_prepare_confounds_wf( run_without_submitting=False, ) - # fmt:off workflow.connect([ (inputnode, ds_design_matrix_plot, [("name_source", "source_file")]), (plot_design_matrix, ds_design_matrix_plot, [("design_matrix_figure", "in_file")]), - ]) - # fmt:on + ]) # fmt:skip censor_report = pe.Node( CensoringPlot( TR=TR, motion_filter_type=motion_filter_type, - fd_thresh=fd_thresh, + fd_thresh=config.workflow.fd_thresh, + dvars_thresh=config.workflow.dvars_thresh, + censor_before=config.workflow.censor_before, + censor_after=config.workflow.censor_after, + censor_between=config.workflow.censor_between, head_radius=head_radius, ), name="censor_report", @@ -362,15 +406,12 @@ def init_prepare_confounds_wf( n_procs=omp_nthreads, ) - # fmt:off workflow.connect([ - (generate_confounds, censor_report, [("motion_file", "filtered_motion")]), + (inputnode, censor_report, [("full_confounds", "full_confounds")]), # use pre-dummy scans + (filter_motion, censor_report, [("modified_full_confounds", "modified_full_confounds")]), (dummy_scan_buffer, censor_report, [("dummy_scans", "dummy_scans")]), - (random_censor, censor_report, [("temporal_mask", "temporal_mask")]), - # use the undropped version - (inputnode, censor_report, [("fmriprep_confounds_file", "fmriprep_confounds_file")]), - ]) - # fmt:on + (tmask_buffer, censor_report, [("temporal_mask", "temporal_mask")]), + ]) # fmt:skip ds_report_censoring = pe.Node( DerivativesDataSink( @@ -384,12 +425,10 @@ def init_prepare_confounds_wf( run_without_submitting=False, ) - # fmt:off workflow.connect([ (inputnode, ds_report_censoring, [("name_source", "source_file")]), (censor_report, ds_report_censoring, [("out_file", "in_file")]), - ]) - # fmt:on + ]) # fmt:skip return workflow @@ -534,7 +573,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): preprocessed_bold %(temporal_mask)s mask - confounds_file + design_matrix Outputs ------- @@ -545,6 +584,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): workflow = Workflow(name=name) fd_thresh = config.workflow.fd_thresh + dvars_thresh = config.workflow.dvars_thresh low_pass = config.workflow.low_pass high_pass = config.workflow.high_pass bpf_order = config.workflow.bpf_order @@ -558,7 +598,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): Nuisance regressors were regressed from the BOLD data using a denoising method based on *Nilearn*'s approach. """ - if fd_thresh > 0: + if fd_thresh[0] > 0 or dvars_thresh[0] > 0: workflow.__desc__ += ( "Any volumes censored earlier in the workflow were first cubic spline interpolated in " "the BOLD data. " @@ -588,7 +628,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): "The same filter was applied to the confounds." ) - if fd_thresh > 0: + if fd_thresh[0] > 0 or dvars_thresh[0] > 0: workflow.__desc__ += ( " The resulting time series were then denoised via linear regression, " "in which the low-motion volumes from the BOLD time series and confounds were used to " @@ -607,7 +647,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): "preprocessed_bold", "temporal_mask", "mask", # only used for NIFTIs - "confounds_file", + "design_matrix", ], ), name="inputnode", @@ -640,7 +680,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): workflow.connect([ (inputnode, regress_and_filter_bold, [ ("preprocessed_bold", "preprocessed_bold"), - ("confounds_file", "confounds_file"), + ("design_matrix", "design_matrix"), ("temporal_mask", "temporal_mask"), ]), (regress_and_filter_bold, outputnode, [ diff --git a/xcp_d/workflows/restingstate.py b/xcp_d/workflows/restingstate.py index 5218109ac..897bec73e 100644 --- a/xcp_d/workflows/restingstate.py +++ b/xcp_d/workflows/restingstate.py @@ -92,13 +92,12 @@ def init_alff_wf( output_dir = config.execution.xcp_d_dir low_pass = config.workflow.low_pass high_pass = config.workflow.high_pass - fd_thresh = config.workflow.fd_thresh smoothing = config.workflow.smoothing cifti = config.workflow.cifti omp_nthreads = config.nipype.omp_nthreads periodogram_desc = "" - if fd_thresh > 0: + if config.workflow.fd_thresh[0] > 0 or config.workflow.dvars_thresh[0] > 0: periodogram_desc = ( " using the Lomb-Scargle periodogram " "[@lomb1976least;@scargle1982studies;@townsend2010fast;@taylorlomb]"