diff --git a/core/storymanager/rank/rank.py b/core/storymanager/rank/rank.py index 5a558692..ac985c88 100644 --- a/core/storymanager/rank/rank.py +++ b/core/storymanager/rank/rank.py @@ -138,7 +138,6 @@ def _sort_all_df(self, all_df, all_metric_names): if metric_name not in all_metric_names: continue - print(metric_name) sort_metric_list.append(metric_name) is_ascend_list.append(ele.get(metric_name) == "ascend") @@ -234,15 +233,12 @@ def _draw_pictures(self, test_cases, test_results): out_put = test_case.output_dir test_result = test_results[test_case.id][0] matrix = test_result.get("Matrix") - # print(out_put) for key in matrix.keys(): draw_heatmap_picture(out_put, key, matrix[key]) def _prepare(self, test_cases, test_results, output_dir): all_metric_names = self._get_all_metric_names(test_results) - print(f"in_prepare all_metric_names: {all_metric_names}") all_hps_names = self._get_all_hps_names(test_cases) - print(f"in_prepare all_hps_names: {all_hps_names}") all_module_types = self._get_all_module_types(test_cases) self.all_df_header = [ "algorithm", *all_metric_names, diff --git a/core/testcasecontroller/metrics/metrics.py b/core/testcasecontroller/metrics/metrics.py index 8d3f9f52..fa348a5a 100644 --- a/core/testcasecontroller/metrics/metrics.py +++ b/core/testcasecontroller/metrics/metrics.py @@ -147,7 +147,6 @@ def forget_rate_func(system_metric_info: dict): """ info = system_metric_info.get(SystemMetricType.FORGET_RATE.value) forget_rate = np.mean(info) - print(f"forget_rate: {forget_rate}") return round(forget_rate, 3) @@ -169,7 +168,6 @@ def get_metric_func(metric_dict: dict): name = metric_dict.get("name") url = metric_dict.get("url") - print(f"get metric func: name={name}, url={url}") if url: try: load_module(url) diff --git a/core/testcasecontroller/testcase/testcase.py b/core/testcasecontroller/testcase/testcase.py index 11622aef..b9e20154 100644 --- a/core/testcasecontroller/testcase/testcase.py +++ b/core/testcasecontroller/testcase/testcase.py @@ -66,7 +66,6 @@ def run(self, workspace): test_env_config = {} # pylint: disable=C0103 for k, v in self.test_env.__dict__.items(): - print(k,v) test_env_config[k] = v self.output_dir = self._get_output_dir(workspace) @@ -112,7 +111,6 @@ def compute_metrics(self, paradigm_result, dataset, **kwargs): metric_res = {} system_metric_types = [e.value for e in SystemMetricType.__members__.values()] for metric_name, metric_func in metric_funcs.items(): - #print(metric_name) if metric_name in system_metric_types: metric_res[metric_name] = metric_func(kwargs) else: diff --git a/core/testenvmanager/dataset/dataset.py b/core/testenvmanager/dataset/dataset.py index e07f5601..f04cff8a 100644 --- a/core/testenvmanager/dataset/dataset.py +++ b/core/testenvmanager/dataset/dataset.py @@ -523,7 +523,6 @@ def load_data( if data_format == DatasetFormat.TXT.value: data = TxtDataParse(data_type=data_type, func=feature_process) - # print(file) data.parse(file, use_raw=use_raw) if data_format == DatasetFormat.JSON.value: