diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 78fae2d871..ec3384018a 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -608,13 +608,8 @@ def _run_interface(self, runtime): if len(imgseries.shape) != 4: raise ValueError( - "{} expected a 4-D nifti file. Input {} has " - "{} dimensions (shape {})".format( - self._header, - self.inputs.realigned_file, - len(imgseries.shape), - imgseries.shape, - ) + f"{self._header} expected a 4-D nifti file. Input {self.inputs.realigned_file} has " + f"{len(imgseries.shape)} dimensions (shape {imgseries.shape})" ) if len(mask_images) == 0: @@ -648,8 +643,8 @@ def _run_interface(self, runtime): if TR == 0: raise ValueError( - "{} cannot detect repetition time from image - " - "Set the repetition_time input".format(self._header) + f"{self._header} cannot detect repetition time from image - " + "Set the repetition_time input" ) if isdefined(self.inputs.variance_threshold): @@ -753,8 +748,8 @@ def _run_interface(self, runtime): f.write("\t".join(["component"] + list(metadata.keys())) + "\n") for i in zip(components_names, *metadata.values()): f.write( - "{0[0]}\t{0[1]}\t{0[2]:.10f}\t" - "{0[3]:.10f}\t{0[4]:.10f}\t{0[5]}\n".format(i) + f"{i[0]}\t{i[1]}\t{i[2]:.10f}\t" + f"{i[3]:.10f}\t{i[4]:.10f}\t{i[5]}\n" ) return runtime @@ -1398,9 +1393,7 @@ def compute_noise_components( if imgseries.shape[:3] != mask.shape: raise ValueError( "Inputs for CompCor, timeseries and mask, do not have " - "matching spatial dimensions ({} and {}, respectively)".format( - imgseries.shape[:3], mask.shape - ) + f"matching spatial dimensions ({imgseries.shape[:3]} and {mask.shape}, respectively)" ) voxel_timecourses = imgseries[mask, :] diff --git a/nipype/caching/memory.py b/nipype/caching/memory.py index a1d45ffff2..e03aa9494e 100644 --- a/nipype/caching/memory.py +++ b/nipype/caching/memory.py @@ -86,12 +86,7 @@ def __call__(self, **kwargs): return out def __repr__(self): - return "{}({}.{}), base_dir={})".format( - self.__class__.__name__, - self.interface.__module__, - self.interface.__name__, - self.base_dir, - ) + return f"{self.__class__.__name__}({self.interface.__module__}.{self.interface.__name__}), base_dir={self.base_dir})" ############################################################################### diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 41037ffc5f..3760466d41 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -204,11 +204,7 @@ def _transformation_constructor(self): return "".join(retval) def _regularization_constructor(self): - return "--regularization {}[{},{}]".format( - self.inputs.regularization, - self.inputs.regularization_gradient_field_sigma, - self.inputs.regularization_deformation_field_sigma, - ) + return f"--regularization {self.inputs.regularization}[{self.inputs.regularization_gradient_field_sigma},{self.inputs.regularization_deformation_field_sigma}]" def _affine_gradient_descent_option_constructor(self): values = self.inputs.affine_gradient_descent_option @@ -1242,10 +1238,7 @@ def _format_winsorize_image_intensities(self): ) ) self._quantilesDone = True - return "--winsorize-image-intensities [ {}, {} ]".format( - self.inputs.winsorize_lower_quantile, - self.inputs.winsorize_upper_quantile, - ) + return f"--winsorize-image-intensities [ {self.inputs.winsorize_lower_quantile}, {self.inputs.winsorize_upper_quantile} ]" def _get_initial_transform_filenames(self): n_transforms = len(self.inputs.initial_moving_transform) @@ -1269,10 +1262,7 @@ def _get_initial_transform_filenames(self): def _format_arg(self, opt, spec, val): if opt == "fixed_image_mask": if isdefined(self.inputs.moving_image_mask): - return "--masks [ {}, {} ]".format( - self.inputs.fixed_image_mask, - self.inputs.moving_image_mask, - ) + return f"--masks [ {self.inputs.fixed_image_mask}, {self.inputs.moving_image_mask} ]" else: return "--masks %s" % self.inputs.fixed_image_mask elif opt == "transforms": @@ -1309,16 +1299,9 @@ def _format_arg(self, opt, spec, val): out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename and inv_out_filename: - return "--output [ {}, {}, {} ]".format( - self.inputs.output_transform_prefix, - out_filename, - inv_out_filename, - ) + return f"--output [ {self.inputs.output_transform_prefix}, {out_filename}, {inv_out_filename} ]" elif out_filename: - return "--output [ {}, {} ]".format( - self.inputs.output_transform_prefix, - out_filename, - ) + return f"--output [ {self.inputs.output_transform_prefix}, {out_filename} ]" else: return "--output %s" % self.inputs.output_transform_prefix elif opt == "winsorize_upper_quantile" or opt == "winsorize_lower_quantile": @@ -1590,29 +1573,16 @@ class MeasureImageSimilarity(ANTSCommand): def _metric_constructor(self): retval = ( - '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},' - "{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]".format( - metric=self.inputs.metric, - fixed_image=self.inputs.fixed_image, - moving_image=self.inputs.moving_image, - metric_weight=self.inputs.metric_weight, - radius_or_number_of_bins=self.inputs.radius_or_number_of_bins, - sampling_strategy=self.inputs.sampling_strategy, - sampling_percentage=self.inputs.sampling_percentage, - ) + f'--metric {self.inputs.metric}["{self.inputs.fixed_image}","{self.inputs.moving_image}",{self.inputs.metric_weight},' + f"{self.inputs.radius_or_number_of_bins},{self.inputs.sampling_strategy},{self.inputs.sampling_percentage}]" ) return retval def _mask_constructor(self): if self.inputs.moving_image_mask: - retval = '--masks ["{fixed_image_mask}","{moving_image_mask}"]'.format( - fixed_image_mask=self.inputs.fixed_image_mask, - moving_image_mask=self.inputs.moving_image_mask, - ) + retval = f'--masks ["{self.inputs.fixed_image_mask}","{self.inputs.moving_image_mask}"]' else: - retval = '--masks "{fixed_image_mask}"'.format( - fixed_image_mask=self.inputs.fixed_image_mask - ) + retval = f'--masks "{self.inputs.fixed_image_mask}"' return retval def _format_arg(self, opt, spec, val): @@ -1871,9 +1841,7 @@ def _list_outputs(self): f"00_{self.inputs.output_prefix}_AffineTransform.mat" ) outputs["displacement_field"] = os.path.abspath( - "01_{}_DisplacementFieldTransform.nii.gz".format( - self.inputs.output_prefix - ) + f"01_{self.inputs.output_prefix}_DisplacementFieldTransform.nii.gz" ) if self.inputs.process == "assemble": outputs["out_file"] = os.path.abspath(self.inputs.out_file) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 35764d5397..57c7d78bde 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -849,8 +849,8 @@ def _filename_from_source(self, name, chain=None): if not isinstance(ns, (str, bytes)): raise ValueError( - "name_source of '{}' trait should be an input trait " - "name, but a type {} object was found".format(name, type(ns)) + f"name_source of '{name}' trait should be an input trait " + f"name, but a type {type(ns)} object was found" ) if isdefined(getattr(self.inputs, ns)): diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index a7f61e6889..6291db46f4 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -141,10 +141,7 @@ def _deprecated_warn(self, obj, name, old, new): raise TraitError(msg) else: if trait_spec.new_name: - msg += "Unsetting old value {}; setting new value {}.".format( - name, - trait_spec.new_name, - ) + msg += f"Unsetting old value {name}; setting new value {trait_spec.new_name}." warn(msg) if trait_spec.new_name: self.trait_set( diff --git a/nipype/interfaces/base/tests/test_resource_monitor.py b/nipype/interfaces/base/tests/test_resource_monitor.py index 8ecd5269e9..5e2365bd39 100644 --- a/nipype/interfaces/base/tests/test_resource_monitor.py +++ b/nipype/interfaces/base/tests/test_resource_monitor.py @@ -55,7 +55,9 @@ class UseResources(CommandLine): @pytest.mark.skip(reason="inconsistent readings") @pytest.mark.skipif(os.getenv("CI_SKIP_TEST", False), reason="disabled in CI tests") -@pytest.mark.parametrize(("mem_gb", "n_procs"), [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)]) +@pytest.mark.parametrize( + ("mem_gb", "n_procs"), [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)] +) def test_cmdline_profiling(tmpdir, mem_gb, n_procs, use_resource_monitor): """ Test runtime profiler correctly records workflow RAM/CPUs consumption @@ -80,7 +82,9 @@ def test_cmdline_profiling(tmpdir, mem_gb, n_procs, use_resource_monitor): @pytest.mark.skipif( True, reason="test disabled temporarily, until function profiling works" ) -@pytest.mark.parametrize(("mem_gb", "n_procs"), [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)]) +@pytest.mark.parametrize( + ("mem_gb", "n_procs"), [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)] +) def test_function_profiling(tmpdir, mem_gb, n_procs, use_resource_monitor): """ Test runtime profiler correctly records workflow RAM/CPUs consumption diff --git a/nipype/interfaces/cmtk/parcellation.py b/nipype/interfaces/cmtk/parcellation.py index 98e3166be0..d9a7ab1bda 100644 --- a/nipype/interfaces/cmtk/parcellation.py +++ b/nipype/interfaces/cmtk/parcellation.py @@ -315,15 +315,10 @@ def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name): ) runCmd(mri_cmd, log) runCmd("mris_volmask %s" % subject_id, log) - mri_cmd = 'mri_convert -i "{}/mri/ribbon.mgz" -o "{}/mri/ribbon.nii.gz"'.format( - op.join(subjects_dir, subject_id), - op.join(subjects_dir, subject_id), - ) + subject_path = op.join(subjects_dir, subject_id) + mri_cmd = f'mri_convert -i "{subject_path}/mri/ribbon.mgz" -o "{subject_path}/mri/ribbon.nii.gz"' runCmd(mri_cmd, log) - mri_cmd = 'mri_convert -i "{}/mri/aseg.mgz" -o "{}/mri/aseg.nii.gz"'.format( - op.join(subjects_dir, subject_id), - op.join(subjects_dir, subject_id), - ) + mri_cmd = f'mri_convert -i "{subject_path}/mri/aseg.mgz" -o "{subject_path}/mri/aseg.nii.gz"' runCmd(mri_cmd, log) iflogger.info("[ DONE ]") diff --git a/nipype/interfaces/dtitk/base.py b/nipype/interfaces/dtitk/base.py index 6f46f8d404..50a23896f4 100644 --- a/nipype/interfaces/dtitk/base.py +++ b/nipype/interfaces/dtitk/base.py @@ -42,11 +42,10 @@ def __init__(self, *args, **kwargs): rename_idx = classes.index("DTITKRenameMixin") new_name = classes[rename_idx + 1] warnings.warn( - "The {} interface has been renamed to {}\n" + f"The {dep_name} interface has been renamed to {new_name}\n" "Please see the documentation for DTI-TK " "interfaces, as some inputs have been " - "added or renamed for clarity." - "".format(dep_name, new_name), + "added or renamed for clarity.", DeprecationWarning, ) super().__init__(*args, **kwargs) diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 5b2fd19a0b..80fc64567c 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -729,10 +729,7 @@ def cmdline(self): outdir = self._get_outdir() cmd = [] if not os.path.exists(outdir): - cmdstr = "{} -c \"import os; os.makedirs('{}')\"".format( - op.basename(sys.executable), - outdir, - ) + cmdstr = f"{op.basename(sys.executable)} -c \"import os; os.makedirs('{outdir}')\"" cmd.extend([cmdstr]) infofile = os.path.join(outdir, "shortinfo.txt") if not os.path.exists(infofile): @@ -741,12 +738,7 @@ def cmdline(self): files = self._get_filelist(outdir) for infile, outfile in files: if not os.path.exists(outfile): - single_cmd = "{}{} {} {}".format( - self._cmd_prefix, - self.cmd, - infile, - os.path.join(outdir, outfile), - ) + single_cmd = f"{self._cmd_prefix}{self.cmd} {infile} {os.path.join(outdir, outfile)}" cmd.extend([single_cmd]) return "; ".join(cmd) diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py index ebbc8c6c75..ef2755ef61 100644 --- a/nipype/interfaces/freesurfer/tests/test_preprocess.py +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -65,11 +65,7 @@ def test_fitmsparams(create_files_in_directory): # .inputs based parameters setting fit.inputs.in_files = filelist fit.inputs.out_dir = outdir - assert fit.cmdline == "mri_ms_fitparms {} {} {}".format( - filelist[0], - filelist[1], - outdir, - ) + assert fit.cmdline == f"mri_ms_fitparms {filelist[0]} {filelist[1]} {outdir}" # constructor based parameter setting fit2 = freesurfer.FitMSParams( @@ -184,9 +180,7 @@ def test_bbregister(create_files_in_directory): base, _ = os.path.splitext(base) assert bbr.cmdline == ( - "bbregister --t2 --init-fsl " - "--reg {base}_bbreg_fsaverage.dat " - "--mov {full} --s fsaverage".format(full=filelist[0], base=base) + f"bbregister --t2 --init-fsl --reg {base}_bbreg_fsaverage.dat --mov {filelist[0]} --s fsaverage" ) diff --git a/nipype/interfaces/freesurfer/tests/test_utils.py b/nipype/interfaces/freesurfer/tests/test_utils.py index 323c04166d..4dab8124be 100644 --- a/nipype/interfaces/freesurfer/tests/test_utils.py +++ b/nipype/interfaces/freesurfer/tests/test_utils.py @@ -210,10 +210,7 @@ def test_mrisexpand(tmpdir): nd_res = expand_nd.run() # Commandlines differ - node_cmdline = ( - "mris_expand -T 60 -pial {cwd}/lh.pial {cwd}/lh.smoothwm " - "1 expandtmp".format(cwd=nd_res.runtime.cwd) - ) + node_cmdline = f"mris_expand -T 60 -pial {nd_res.runtime.cwd}/lh.pial {nd_res.runtime.cwd}/lh.smoothwm 1 expandtmp" assert nd_res.runtime.cmdline == node_cmdline # Check output diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index c0abc8ca98..df55dda270 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -355,8 +355,7 @@ def _format_arg(self, name, spec, value): if ext != filemap[value]: if ext in filemap.values(): raise ValueError( - "Cannot create {} file with extension " - "{}".format(value, ext) + f"Cannot create {value} file with extension " f"{ext}" ) else: logger.warning( @@ -595,8 +594,7 @@ def _format_arg(self, name, spec, value): if ext != filemap[value]: if ext in filemap.values(): raise ValueError( - "Cannot create {} file with extension " - "{}".format(value, ext) + f"Cannot create {value} file with extension " f"{ext}" ) else: logger.warning( @@ -998,10 +996,8 @@ def _format_arg(self, name, spec, value): if len(value) == 2: return "-fminmax %.3f %.3f" % value else: - return "-fminmax {:.3f} {:.3f} -fmid {:.3f}".format( - value[0], - value[2], - value[1], + return ( + f"-fminmax {value[0]:.3f} {value[2]:.3f} -fmid {value[1]:.3f}" ) elif name == "annot_name" and isdefined(value): # Matching annot by name needs to strip the leading hemi and trailing @@ -1015,11 +1011,7 @@ def _format_arg(self, name, spec, value): def _run_interface(self, runtime): if not isdefined(self.inputs.screenshot_stem): - stem = "{}_{}_{}".format( - self.inputs.subject_id, - self.inputs.hemi, - self.inputs.surface, - ) + stem = f"{self.inputs.subject_id}_{self.inputs.hemi}_{self.inputs.surface}" else: stem = self.inputs.screenshot_stem stem_args = self.inputs.stem_template_args @@ -1085,11 +1077,7 @@ def _write_tcl_script(self): def _list_outputs(self): outputs = self._outputs().get() if not isdefined(self.inputs.screenshot_stem): - stem = "{}_{}_{}".format( - self.inputs.subject_id, - self.inputs.hemi, - self.inputs.surface, - ) + stem = f"{self.inputs.subject_id}_{self.inputs.hemi}_{self.inputs.surface}" else: stem = self.inputs.screenshot_stem stem_args = self.inputs.stem_template_args diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 8af48d2aa9..95243bf65f 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -1330,9 +1330,9 @@ def _format_arg(self, name, spec, value): if name == "out_intensitymap_file": value = self._list_outputs()[name] value = [FNIRT.intensitymap_file_basename(v) for v in value] - assert len(set(value)) == 1, "Found different basenames for {}: {}".format( - name, value - ) + assert ( + len(set(value)) == 1 + ), f"Found different basenames for {name}: {value}" return spec.argstr % value[0] if name in list(self.filemap.keys()): return spec.argstr % self._list_outputs()[name] diff --git a/nipype/interfaces/fsl/tests/test_maths.py b/nipype/interfaces/fsl/tests/test_maths.py index 189fff8b3f..3d4a21fa83 100644 --- a/nipype/interfaces/fsl/tests/test_maths.py +++ b/nipype/interfaces/fsl/tests/test_maths.py @@ -401,9 +401,7 @@ def test_binarymaths(create_files_in_directory_plus_output_type): assert maths.cmdline == f"fslmaths a.nii -{op} b.nii c.nii" else: maths.inputs.operand_value = ent - assert maths.cmdline == "fslmaths a.nii -{} {:.8f} c.nii".format( - op, ent - ) + assert maths.cmdline == f"fslmaths a.nii -{op} {ent:.8f} c.nii" # Test that we don't need to ask for an out file for op in ops: @@ -461,9 +459,7 @@ def test_tempfilt(create_files_in_directory_plus_output_type): for win in windows: filt.inputs.highpass_sigma = win[0] filt.inputs.lowpass_sigma = win[1] - assert filt.cmdline == "fslmaths a.nii -bptf {:.6f} {:.6f} b.nii".format( - win[0], win[1] - ) + assert filt.cmdline == f"fslmaths a.nii -bptf {win[0]:.6f} {win[1]:.6f} b.nii" # Test that we don't need to ask for an out file filt = fsl.TemporalFilter(in_file="a.nii", highpass_sigma=64) diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index 2a93916706..d109effc52 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -240,12 +240,7 @@ def test_flirt(setup_flirt): pth, fname, ext = split_filename(infile) outfile = fsl_name(flirter, "%s_flirt" % fname) outmat = "%s_flirt.mat" % fname - realcmd = "flirt -in {} -ref {} -out {} -omat {}".format( - infile, - reffile, - outfile, - outmat, - ) + realcmd = f"flirt -in {infile} -ref {reffile} -out {outfile} -omat {outmat}" assert flirter.cmdline == realcmd # test apply_xfm option @@ -433,14 +428,7 @@ def test_fnirt(setup_flirt): " --iout=%s" % (infile, log, flag, strval, reffile, iout) ) elif item in ("in_fwhm", "intensity_mapping_model"): - cmd = "fnirt --in={} {}={} --logout={} --ref={} --iout={}".format( - infile, - flag, - strval, - log, - reffile, - iout, - ) + cmd = f"fnirt --in={infile} {flag}={strval} --logout={log} --ref={reffile} --iout={iout}" elif item.startswith("apply"): cmd = ( "fnirt %s=%s " diff --git a/nipype/interfaces/fsl/tests/test_utils.py b/nipype/interfaces/fsl/tests/test_utils.py index bfe895c6ee..feffe553e3 100644 --- a/nipype/interfaces/fsl/tests/test_utils.py +++ b/nipype/interfaces/fsl/tests/test_utils.py @@ -168,10 +168,7 @@ def test_overlay(create_files_in_directory_plus_output_type): ) assert ( overlay2.cmdline - == "overlay 1 0 {} -a {} 2.50 10.00 foo2_overlay.nii".format( - filelist[1], - filelist[0], - ) + == f"overlay 1 0 {filelist[1]} -a {filelist[0]} 2.50 10.00 foo2_overlay.nii" ) @@ -199,10 +196,7 @@ def test_slicer(create_files_in_directory_plus_output_type): slicer.inputs.out_file = "foo_bar.png" assert ( slicer.cmdline - == "slicer {} {} -L -i 10.000 20.000 -A 750 foo_bar.png".format( - filelist[0], - filelist[1], - ) + == f"slicer {filelist[0]} {filelist[1]} -L -i 10.000 20.000 -A 750 foo_bar.png" ) # .run based parameter setting @@ -317,9 +311,8 @@ def test_convertxfm(create_files_in_directory_plus_output_type): cvt2 = fsl.ConvertXFM( in_file=filelist[0], in_file2=filelist[1], concat_xfm=True, out_file="bar.mat" ) - assert cvt2.cmdline == "convert_xfm -omat bar.mat -concat {} {}".format( - filelist[1], - filelist[0], + assert ( + cvt2.cmdline == f"convert_xfm -omat bar.mat -concat {filelist[1]} {filelist[0]}" ) diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index 2cf868371d..2f253465f3 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -1070,11 +1070,9 @@ def _format_arg(self, name, spec, value): else: return "1" if name == "show_negative_stats": - return "{} {:.2f} {:.2f}".format( - self.inputs.stat_image, - self.inputs.stat_thresh[0] * -1, - self.inputs.stat_thresh[1] * -1, - ) + thresh0 = self.inputs.stat_thresh[0] * -1 + thresh1 = self.inputs.stat_thresh[1] * -1 + return "{self.inputs.stat_image} {thresh0:.2f} {thresh1:.2f}" return super()._format_arg(name, spec, value) def _list_outputs(self): @@ -1085,10 +1083,9 @@ def _list_outputs(self): not isdefined(self.inputs.show_negative_stats) or not self.inputs.show_negative_stats ): - stem = "{}_and_{}".format( - split_filename(self.inputs.stat_image)[1], - split_filename(self.inputs.stat_image2)[1], - ) + image = split_filename(self.inputs.stat_image)[1] + image2 = split_filename(self.inputs.stat_image2)[1] + stem = f"{image}_and_{image2}" else: stem = split_filename(self.inputs.stat_image)[1] out_file = self._gen_fname(stem, suffix="_overlay") @@ -1455,11 +1452,7 @@ def _format_arg(self, name, spec, value): titledict = dict(fsl="MCFLIRT", spm="Realign") unitdict = dict(rot="radians", tra="mm") - title = "'{} estimated {} ({})'".format( - titledict[source], - value, - unitdict[value[:3]], - ) + title = f"'{titledict[source]} estimated {value} ({unitdict[value[:3]]})'" return f"-t {title} {sfstr} -a x,y,z" elif name == "plot_size": @@ -2580,7 +2573,7 @@ def _coords_to_trk(self, points, out_file): def _overload_extension(self, value, name): if name == "out_file": - return "{}.{}".format(value, self._outformat) + return f"{value}.{self._outformat}" def _run_interface(self, runtime): fname = self._in_file diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 3efb7d680e..19f964a7f2 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -135,15 +135,11 @@ def _get_head_bucket(s3_resource, bucket_name): ) raise Exception(err_msg) else: - err_msg = "Unable to connect to bucket: {}. Error message:\n{}".format( - bucket_name, - exc, + err_msg = ( + f"Unable to connect to bucket: {bucket_name}. Error message:\n{exc}" ) except Exception as exc: - err_msg = "Unable to connect to bucket: {}. Error message:\n{}".format( - bucket_name, - exc, - ) + err_msg = f"Unable to connect to bucket: {bucket_name}. Error message:\n{exc}" raise Exception(err_msg) @@ -961,10 +957,7 @@ def _list_outputs(self): if not args: filelist = [fname for fname in bkt_files if re.match(template, fname)] if len(filelist) == 0: - msg = "Output key: {} Template: {} returned no files".format( - key, - template, - ) + msg = f"Output key: {key} Template: {template} returned no files" if self.inputs.raise_on_empty: raise OSError(msg) else: @@ -1009,10 +1002,7 @@ def _list_outputs(self): if re.match(filledtemplate, fname): outfiles.append(fname) if len(outfiles) == 0: - msg = "Output key: {} Template: {} returned no files".format( - key, - filledtemplate, - ) + msg = f"Output key: {key} Template: {filledtemplate} returned no files" if self.inputs.raise_on_empty: raise OSError(msg) else: @@ -1236,10 +1226,7 @@ def _list_outputs(self): if not args: filelist = glob.glob(template) if len(filelist) == 0: - msg = "Output key: {} Template: {} returned no files".format( - key, - template, - ) + msg = f"Output key: {key} Template: {template} returned no files" if self.inputs.raise_on_empty: raise OSError(msg) else: @@ -1281,10 +1268,7 @@ def _list_outputs(self): ) outfiles = glob.glob(filledtemplate) if len(outfiles) == 0: - msg = "Output key: {} Template: {} returned no files".format( - key, - filledtemplate, - ) + msg = f"Output key: {key} Template: {filledtemplate} returned no files" if self.inputs.raise_on_empty: raise OSError(msg) else: @@ -1454,9 +1438,8 @@ def _list_outputs(self): # Handle the case where nothing matched if not filelist: - msg = "No files were found matching {} template: {}".format( - field, - filled_template, + msg = ( + f"No files were found matching {field} template: {filled_template}" ) if self.inputs.raise_on_empty: raise OSError(msg) diff --git a/nipype/interfaces/niftyreg/reg.py b/nipype/interfaces/niftyreg/reg.py index 2c7657e6ae..d14b94dc6e 100644 --- a/nipype/interfaces/niftyreg/reg.py +++ b/nipype/interfaces/niftyreg/reg.py @@ -397,11 +397,7 @@ def _list_outputs(self): if self.inputs.vel_flag is True and isdefined(self.inputs.aff_file): cpp_file = os.path.abspath(outputs["cpp_file"]) flo_file = os.path.abspath(self.inputs.flo_file) - outputs["avg_output"] = "{} {} {}".format( - self.inputs.aff_file, - cpp_file, - flo_file, - ) + outputs["avg_output"] = f"{self.inputs.aff_file} {cpp_file} {flo_file}" else: cpp_file = os.path.abspath(outputs["cpp_file"]) flo_file = os.path.abspath(self.inputs.flo_file) diff --git a/nipype/interfaces/nilearn.py b/nipype/interfaces/nilearn.py index 6d928cbfaf..afe8e62472 100644 --- a/nipype/interfaces/nilearn.py +++ b/nipype/interfaces/nilearn.py @@ -135,18 +135,14 @@ def _process_inputs(self): # check label list size if not np.isclose(int(n_labels), n_labels): raise ValueError( - "The label files {} contain invalid value {}. Check input.".format( - self.inputs.label_files, n_labels - ) + f"The label files {self.inputs.label_files} contain invalid value {n_labels}. Check input." ) if len(self.inputs.class_labels) != n_labels: raise ValueError( - "The length of class_labels {} does not " - "match the number of regions {} found in " - "label_files {}".format( - self.inputs.class_labels, n_labels, self.inputs.label_files - ) + f"The length of class_labels {self.inputs.class_labels} does not " + f"match the number of regions {n_labels} found in " + f"label_files {self.inputs.label_files}" ) if self.inputs.include_global: diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index a36dd6b55a..acddb887f6 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -327,9 +327,7 @@ def generate_class( ]: if not param.getElementsByTagName("channel"): raise RuntimeError( - "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{}".format( - traitsParams - ) + f"Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{traitsParams}" ) elif ( param.getElementsByTagName("channel")[0].firstChild.nodeValue @@ -378,9 +376,7 @@ def generate_class( ) else: raise RuntimeError( - "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{}".format( - traitsParams - ) + f"Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{traitsParams}" ) else: # For all other parameter types, they are implicitly only input types inputTraits.append( diff --git a/nipype/interfaces/spm/utils.py b/nipype/interfaces/spm/utils.py index 76944893e1..bc6a06edc6 100644 --- a/nipype/interfaces/spm/utils.py +++ b/nipype/interfaces/spm/utils.py @@ -105,22 +105,17 @@ def _make_matlab_command(self, _): self.inputs.mat = self._make_mat_file() if not isdefined(self.inputs.invmat): self.inputs.invmat = self._make_inv_file() - script = """ - target = '{}'; - moving = '{}'; + script = f""" + target = '{self.inputs.target}'; + moving = '{self.inputs.moving}'; targetv = spm_vol(target); movingv = spm_vol(moving); x = spm_coreg(targetv, movingv); M = spm_matrix(x); - save('{}' , 'M' ); + save('{self.inputs.mat}' , 'M' ); M = inv(M); - save('{}','M') - """.format( - self.inputs.target, - self.inputs.moving, - self.inputs.mat, - self.inputs.invmat, - ) + save('{self.inputs.invmat}','M') + """ return script def _list_outputs(self): @@ -166,10 +161,10 @@ def _make_matlab_command(self, _): """checks for SPM, generates script""" outputs = self._list_outputs() self.inputs.out_file = outputs["out_file"] - script = """ - infile = '{}'; - outfile = '{}' - transform = load('{}'); + script = f""" + infile = '{self.inputs.in_file}'; + outfile = '{self.inputs.out_file}' + transform = load('{self.inputs.mat}'); V = spm_vol(infile); X = spm_read_vols(V); @@ -178,11 +173,7 @@ def _make_matlab_command(self, _): V.fname = fullfile(outfile); spm_write_vol(V,X); - """.format( - self.inputs.in_file, - self.inputs.out_file, - self.inputs.mat, - ) + """ # img_space = spm_get_space(infile); # spm_get_space(infile, transform.M * img_space); return script diff --git a/nipype/interfaces/workbench/metric.py b/nipype/interfaces/workbench/metric.py index 07e068e901..50e4300cd5 100644 --- a/nipype/interfaces/workbench/metric.py +++ b/nipype/interfaces/workbench/metric.py @@ -149,8 +149,7 @@ def _format_arg(self, opt, spec, val): if opt in ["current_area", "new_area"]: if not self.inputs.area_surfs and not self.inputs.area_metrics: raise ValueError( - "{} was set but neither area_surfs or" - " area_metrics were set".format(opt) + f"{opt} was set but neither area_surfs or area_metrics were set" ) if opt == "method": if ( diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index fa785c6bae..ae7c057b5c 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -369,13 +369,7 @@ def format_node(node, format="python", include_config=False): args = ", ".join(filled_args) klass_name = klass.__class__.__name__ if isinstance(node, MapNode): - nodedef = '{} = MapNode({}({}), iterfield={}, name="{}")'.format( - name, - klass_name, - args, - node.iterfield, - name, - ) + nodedef = f'{name} = MapNode({klass_name}({args}), iterfield={node.iterfield}, name="{name}")' else: nodedef = f'{name} = Node({klass_name}({args}), name="{name}")' lines = [importline, comment, nodedef] @@ -782,9 +776,7 @@ def _merge_graphs( rootnode = list(Gc.nodes())[nodeidx] paramstr = "" for key, val in sorted(params.items()): - paramstr = "{}_{}_{}".format( - paramstr, _get_valid_pathstr(key), _get_valid_pathstr(val) - ) + paramstr = f"{paramstr}_{_get_valid_pathstr(key)}_{_get_valid_pathstr(val)}" rootnode.set_input(key, val) logger.debug("Parameterization: paramstr=%s", paramstr) @@ -916,10 +908,8 @@ def _propagate_internal_output(graph, node, field, connections, portinputs): src_func = src_port[1].split("\\n")[0] dst_func = src[1].split("\\n")[0] raise ValueError( - "Does not support two inline functions " - "in series ('{}' and '{}'), found when " - "connecting {} to {}. Please use a Function " - "node.".format(src_func, dst_func, srcnode, destnode) + f"Does not support two inline functions in series ('{src_func}' and '{dst_func}'), " + f"found when connecting {srcnode} to {destnode}. Please use a Function node." ) connect = graph.get_edge_data(srcnode, destnode, default={"connect": []}) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 93758b0a13..7bc4faef34 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -444,8 +444,7 @@ def write_graph( if graph2use in ["hierarchical", "colored"]: if self.name[:1].isdigit(): # these graphs break if int raise ValueError( - "{} graph failed, workflow name cannot begin " - "with a number".format(graph2use) + f"{graph2use} graph failed, workflow name cannot begin with a number" ) dotfilename = op.join(base_dir, dotfilename) self.write_hierarchical_dotfile( diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 1571ab71a9..f84dccb039 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -534,11 +534,8 @@ def _get_result(self, taskid): results_file = None try: error_message = ( - "Job id ({}) finished or terminated, but " - "results file does not exist after ({}) " - "seconds. Batch dir contains crashdump file " - "if node raised an exception.\n" - "Node working directory: ({}) ".format(taskid, timeout, node_dir) + f"Job id ({taskid}) finished or terminated, but results file does not exist after ({timeout}) seconds. Batch dir contains crashdump file if node raised an exception.\n" + f"Node working directory: ({node_dir}) " ) raise OSError(error_message) except OSError: diff --git a/nipype/pipeline/plugins/dagman.py b/nipype/pipeline/plugins/dagman.py index 55f3f03bee..bd8a8b05b2 100644 --- a/nipype/pipeline/plugins/dagman.py +++ b/nipype/pipeline/plugins/dagman.py @@ -147,10 +147,8 @@ def _submit_graph(self, pyfiles, dependencies, nodes): ) if wrapper_cmd is not None: specs["executable"] = wrapper_cmd - specs["nodescript"] = "{} {} {}".format( - wrapper_args % specs, # give access to variables - sys.executable, - pyscript, + specs["nodescript"] = ( + f"{wrapper_args % specs} {sys.executable} {pyscript}" ) submitspec = template % specs # write submit spec for this job diff --git a/nipype/pipeline/plugins/lsf.py b/nipype/pipeline/plugins/lsf.py index cf334be051..fea0d4267d 100644 --- a/nipype/pipeline/plugins/lsf.py +++ b/nipype/pipeline/plugins/lsf.py @@ -85,11 +85,7 @@ def _submit_batchtask(self, scriptfile, node): jobnameitems = jobname.split(".") jobnameitems.reverse() jobname = ".".join(jobnameitems) - cmd.inputs.args = "{} -J {} sh {}".format( - bsubargs, - jobname, - scriptfile, - ) # -J job_name_spec + cmd.inputs.args = f"{bsubargs} -J {jobname} sh {scriptfile}" # -J job_name_spec logger.debug("bsub " + cmd.inputs.args) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName("CRITICAL")) diff --git a/nipype/pipeline/plugins/sge.py b/nipype/pipeline/plugins/sge.py index 38079e947d..0ea15f5713 100644 --- a/nipype/pipeline/plugins/sge.py +++ b/nipype/pipeline/plugins/sge.py @@ -82,9 +82,7 @@ def is_job_state_pending(self): time_diff = time.time() - self._job_info_creation_time if self.is_zombie(): sge_debug_print( - "DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{}".format( - self - ) + f"DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{self}" ) is_pending_status = False # Job explicitly found as being completed! elif self.is_initializing() and (time_diff > 600): @@ -154,9 +152,7 @@ def _qacct_verified_complete(taskid): from the qacct report """ sge_debug_print( - "WARNING: " - "CONTACTING qacct for finished jobs, " - "{}: {}".format(time.time(), "Verifying Completion") + f"WARNING: CONTACTING qacct for finished jobs, {time.time()}: Verifying Completion" ) this_command = "qacct" @@ -253,10 +249,7 @@ def _parse_qstat_job_list(self, xml_job_list): self._task_dictionary[dictionary_job].set_state("zombie") else: sge_debug_print( - "ERROR: Job not in current parselist, " - "and not in done list {}: {}".format( - dictionary_job, self._task_dictionary[dictionary_job] - ) + f"ERROR: Job not in current parselist, and not in done list {dictionary_job}: {self._task_dictionary[dictionary_job]}" ) if self._task_dictionary[dictionary_job].is_initializing(): is_completed = self._qacct_verified_complete(dictionary_job) @@ -264,10 +257,7 @@ def _parse_qstat_job_list(self, xml_job_list): self._task_dictionary[dictionary_job].set_state("zombie") else: sge_debug_print( - "ERROR: Job not in still in initialization mode, " - "and not in done list {}: {}".format( - dictionary_job, self._task_dictionary[dictionary_job] - ) + f"ERROR: Job not in still in initialization mode, and not in done list {dictionary_job}: {self._task_dictionary[dictionary_job]}" ) def _run_qstat(self, reason_for_qstat, force_instant=True): @@ -279,8 +269,7 @@ def _run_qstat(self, reason_for_qstat, force_instant=True): -s s suspended jobs """ sge_debug_print( - "WARNING: CONTACTING qmaster for jobs, " - "{}: {}".format(time.time(), reason_for_qstat) + f"WARNING: CONTACTING qmaster for jobs, {time.time()}: {reason_for_qstat}" ) if force_instant: this_command = self._qstat_instant_executable @@ -311,10 +300,7 @@ def _run_qstat(self, reason_for_qstat, force_instant=True): self._parse_qstat_job_list(runjobs) break except Exception as inst: - exception_message = "QstatParsingError:\n\t{}\n\t{}\n".format( - type(inst), # the exception instance - inst, # __str__ allows args to printed directly - ) + exception_message = f"QstatParsingError:\n\t{type(inst)}\n\t{inst}\n" sge_debug_print(exception_message) time.sleep(5) @@ -340,8 +326,7 @@ def is_job_pending(self, task_id): job_is_pending = self._task_dictionary[task_id].is_job_state_pending() else: sge_debug_print( - "ERROR: Job {} not in task list, " - "even after forced qstat!".format(task_id) + f"ERROR: Job {task_id} not in task list, even after forced qstat!" ) job_is_pending = False if not job_is_pending: @@ -352,8 +337,7 @@ def is_job_pending(self, task_id): self._task_dictionary.pop(task_id) else: sge_debug_print( - "ERROR: Job {} not in task list, " - "but attempted to be removed!".format(task_id) + f"ERROR: Job {task_id} not in task list, but attempted to be removed!" ) return job_is_pending diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py index 3d9e8ac40d..17aa514f85 100644 --- a/nipype/pipeline/plugins/sgegraph.py +++ b/nipype/pipeline/plugins/sgegraph.py @@ -144,14 +144,7 @@ def make_job_name(jobnumber, nodeslist): stdoutFile = "" if self._qsub_args.count("-o ") == 0: stdoutFile = f"-o {batchscriptoutfile}" - full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '/^Your job/{{print $3}}')\n".format( - jobNm=jobname, - outFileOption=stdoutFile, - errFileOption=stderrFile, - extraQSubArgs=qsub_args, - dependantIndex=deps, - batchscript=batchscriptfile, - ) + full_line = f"{jobname}=$(qsub {stdoutFile} {stderrFile} {qsub_args} {deps} -N {jobname} {batchscriptfile} | awk '/^Your job/{{print $3}}')\n" fp.writelines(full_line) cmd = CommandLine( "bash", diff --git a/nipype/pipeline/plugins/slurmgraph.py b/nipype/pipeline/plugins/slurmgraph.py index 0999595f5d..5ed5701acb 100644 --- a/nipype/pipeline/plugins/slurmgraph.py +++ b/nipype/pipeline/plugins/slurmgraph.py @@ -144,14 +144,7 @@ def make_job_name(jobnumber, nodeslist): stdoutFile = "" if self._sbatch_args.count("-o ") == 0: stdoutFile = f"-o {batchscriptoutfile}" - full_line = "{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk '/^Submitted/ {{print $4}}')\n".format( - jobNm=jobname, - outFileOption=stdoutFile, - errFileOption=stderrFile, - extraSBatchArgs=sbatch_args, - dependantIndex=deps, - batchscript=batchscriptfile, - ) + full_line = f"{jobname}=$(sbatch {stdoutFile} {stderrFile} {sbatch_args} {deps} -J {jobname} {batchscriptfile} | awk '/^Submitted/ {{print $4}}')\n" fp.writelines(full_line) cmd = CommandLine( "bash", diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index bce3eb82da..ae0082d2ed 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -32,12 +32,10 @@ def report_crash(node, traceback=None, hostname=None): keepends=True ) except Exception as exc: - traceback += """ + traceback += f""" During the creation of this crashfile triggered by the above exception, -another exception occurred:\n\n{}.""".format( - exc - ).splitlines( +another exception occurred:\n\n{exc}.""".splitlines( keepends=True ) else: diff --git a/nipype/scripts/utils.py b/nipype/scripts/utils.py index 8d8dc52627..c77674b225 100644 --- a/nipype/scripts/utils.py +++ b/nipype/scripts/utils.py @@ -100,10 +100,7 @@ def add_args_options(arg_parser, interface): if has_multiple_inner_traits: raise NotImplementedError( - "This interface cannot be used. via the" - " command line as multiple inner traits" - " are currently not supported for mandatory" - " argument: {}.".format(name) + f"This interface cannot be used via the command line, as multiple inner traits are currently not supported for mandatory argument: {name}." ) arg_parser.add_argument(name, help=desc, **args) else: diff --git a/nipype/sphinxext/apidoc/docstring.py b/nipype/sphinxext/apidoc/docstring.py index f235a86d7e..25547ff726 100644 --- a/nipype/sphinxext/apidoc/docstring.py +++ b/nipype/sphinxext/apidoc/docstring.py @@ -138,15 +138,11 @@ def _parse_spec(inputs, name, spec): pos = spec.position if pos is None: desc_lines += [ - """Maps to a command-line argument: :code:`{arg}`.""".format( - arg=argstr.strip() - ) + f"""Maps to a command-line argument: :code:`{argstr.strip()}`.""" ] else: desc_lines += [ - """Maps to a command-line argument: :code:`{arg}` (position: {pos}).""".format( - arg=argstr.strip(), pos=pos - ) + f"""Maps to a command-line argument: :code:`{argstr.strip()}` (position: {pos}).""" ] xor = spec.xor diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index fea9ea25a7..2cc5182902 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -590,11 +590,7 @@ def loadpkl(infile): fmlogger.debug(f"'{infile}' missing; waiting 2s") sleep(2) if timed_out: - error_message = ( - "Result file {} expected, but " - "does not exist after ({}) " - "seconds.".format(infile, timeout) - ) + error_message = f"Result file {infile} expected, but does not exist after {timeout} seconds." raise OSError(error_message) with pklopen(str(infile), "rb") as pkl_file: diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 11eb7b3d07..895fef2d1e 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -207,8 +207,8 @@ def test_recopy(_temp_analyze_files): img_stat = _ignore_atime(os.stat(new_img)) hdr_stat = _ignore_atime(os.stat(new_hdr)) copyfile(orig_img, new_img, **kwargs) - err_msg = "Regular - OS: {}; Copy: {}; Hardlink: {}".format( - os.name, copy, use_hardlink + err_msg = ( + f"Regular - OS: {os.name}; Copy: {copy}; Hardlink: {use_hardlink}" ) assert img_stat == _ignore_atime(os.stat(new_img)), err_msg assert hdr_stat == _ignore_atime(os.stat(new_hdr)), err_msg @@ -219,8 +219,8 @@ def test_recopy(_temp_analyze_files): img_stat = _ignore_atime(os.stat(new_img)) hdr_stat = _ignore_atime(os.stat(new_hdr)) copyfile(img_link, new_img, **kwargs) - err_msg = "Symlink - OS: {}; Copy: {}; Hardlink: {}".format( - os.name, copy, use_hardlink + err_msg = ( + f"Symlink - OS: {os.name}; Copy: {copy}; Hardlink: {use_hardlink}" ) assert img_stat == _ignore_atime(os.stat(new_img)), err_msg assert hdr_stat == _ignore_atime(os.stat(new_hdr)), err_msg diff --git a/tools/checkspecs.py b/tools/checkspecs.py index 4226f3a4f8..c1ef27ff6a 100644 --- a/tools/checkspecs.py +++ b/tools/checkspecs.py @@ -257,10 +257,7 @@ def test_specs(self, uri): for key, value in sorted(trait.__dict__.items()): if key in in_built or key == "desc": continue - input_fields += "{}={},\n ".format( - key, - self._normalize_repr(value), - ) + input_fields += f"{key}={self._normalize_repr(value)},\n " input_fields += "),\n " cmd += [" input_map = dict(%s)" % input_fields] cmd += [" inputs = %s.input_spec()" % c] @@ -348,10 +345,7 @@ def test_specs(self, uri): for key, value in sorted(trait.__dict__.items()): if key in in_built or key == "desc": continue - input_fields += "{}={},\n ".format( - key, - self._normalize_repr(value), - ) + input_fields += f"{key}={self._normalize_repr(value)},\n " input_fields += "),\n " cmd += [" output_map = dict(%s)" % input_fields] cmd += [" outputs = %s.output_spec()" % c]