diff --git a/lib/galaxy/tools/actions/__init__.py b/lib/galaxy/tools/actions/__init__.py index 0cf24c0c3069..807addfdfbd6 100644 --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -50,7 +50,7 @@ ) from galaxy.tools.execution_helpers import ( filter_output, - on_text_for_names, + on_text_for_dataset_and_collections, ToolExecutionCache, ) from galaxy.tools.parameters import update_dataset_ids @@ -885,6 +885,7 @@ def _wrapped_params(self, trans, tool, incoming, input_datasets=None): def _get_on_text(self, inp_data, inp_dataset_collections): input_names = [] + collection_names = [] collection_hda_ids = set() # output collection id and store included hda ids (to avoid extra inclusion in the list of datasets) # two for loops because: @@ -893,15 +894,15 @@ def _get_on_text(self, inp_data, inp_dataset_collections): for collections in inp_dataset_collections.values(): for dataset_collection, _ in collections: if getattr(dataset_collection, "hid", None): - input_names.append(f"collection {dataset_collection.hid}") + collection_names.append(f"{dataset_collection.hid}") for element in dataset_collection.collection.elements: collection_hda_ids.add(element.hda_id) for data in reversed(list(inp_data.values())): if data.id in collection_hda_ids: continue if getattr(data, "hid", None): - input_names.append(f"data {data.hid}") - return on_text_for_names(input_names) + input_names.append(f"{data.hid}") + return on_text_for_dataset_and_collections(dataset_names=input_names, collection_names=collection_names) def _new_job_for_session(self, trans, tool, history) -> Tuple[model.Job, Optional[model.GalaxySession]]: job = trans.app.model.Job() diff --git a/lib/galaxy/tools/execute.py b/lib/galaxy/tools/execute.py index 31369c948052..2d449581f340 100644 --- a/lib/galaxy/tools/execute.py +++ b/lib/galaxy/tools/execute.py @@ -33,7 +33,7 @@ from galaxy.tool_util.parser import ToolOutputCollectionPart from galaxy.tools.execution_helpers import ( filter_output, - on_text_for_names, + on_text_for_dataset_and_collections, ToolExecutionCache, ) from galaxy.tools.parameters.workflow_utils import is_runtime_value @@ -325,8 +325,8 @@ def record_error(self, error): def on_text(self) -> Optional[str]: collection_info = self.collection_info if self._on_text is None and collection_info is not None: - collection_names = ["collection %d" % c.hid for c in collection_info.collections.values()] - self._on_text = on_text_for_names(collection_names) + collection_names = ["%d" % c.hid for c in collection_info.collections.values()] + self._on_text = on_text_for_dataset_and_collections(collection_names=collection_names) return self._on_text diff --git a/lib/galaxy/tools/execution_helpers.py b/lib/galaxy/tools/execution_helpers.py index 76413a5f6370..2d58f6bc0840 100644 --- a/lib/galaxy/tools/execution_helpers.py +++ b/lib/galaxy/tools/execution_helpers.py @@ -48,7 +48,7 @@ def filter_output(tool, output, incoming): return False -def on_text_for_names(input_names: Collection[str]) -> str: +def on_text_for_names(input_names: Collection[str], prefix) -> str: # input_names may contain duplicates... this is because the first value in # multiple input dataset parameters will appear twice once as param_name # and once as param_name1. @@ -62,11 +62,15 @@ def on_text_for_names(input_names: Collection[str]) -> str: if len(input_names) == 0: on_text = "" elif len(input_names) == 1: - on_text = input_names[0] + on_text = prefix + " " + input_names[0] elif len(input_names) == 2: - on_text = "{} and {}".format(*input_names) + on_text = prefix + "s {} and {}".format(*input_names) elif len(input_names) == 3: - on_text = "{}, {}, and {}".format(*input_names) + on_text = prefix + "s {}, {}, and {}".format(*input_names) else: - on_text = "{}, {}, and others".format(*input_names[:2]) + on_text = prefix + "s {}, {}, and others".format(*input_names[:2]) return on_text + + +def on_text_for_dataset_and_collections(dataset_names: Collection[str], collection_names: Collection[str]) -> str: + return on_text_for_names(collection_names, "collection") + on_text_for_names(dataset_names, "data")