From 52a31b871b5fef8aa13ecf2469032d0ba58f8395 Mon Sep 17 00:00:00 2001 From: iLeGend <824040212@qq.com> Date: Wed, 22 Mar 2023 11:39:38 +0800 Subject: [PATCH] [CodeStyle][UP018] Unnecessary call to `str` (#51922) --- pyproject.toml | 1 + .../auto_parallel/operators/common.py | 8 ++------ .../operators/dist_check_finite_and_unscale.py | 2 +- .../passes/auto_parallel_grad_clip.py | 2 +- .../distributed/passes/auto_parallel_sharding.py | 14 ++++++-------- .../fluid/tests/unittests/check_nan_inf_base.py | 4 ++-- .../unittests/check_nan_inf_base_dygraph.py | 4 ++-- .../unittests/collective/test_gen_nccl_id_op.py | 2 +- .../mkldnn/check_flags_mkldnn_ops_on_off.py | 2 +- .../unittests/mkldnn/check_flags_use_mkldnn.py | 2 +- .../mkldnn/test_flags_mkldnn_ops_on_off.py | 16 ++++++++-------- .../unittests/mkldnn/test_flags_use_mkldnn.py | 6 +++--- .../paddle/fluid/tests/unittests/test_nan_inf.py | 8 +++----- .../tests/unittests/xpu/test_gen_bkcl_id_op.py | 2 +- python/paddle/incubate/passes/ir.py | 2 +- python/paddle/static/io.py | 2 +- 16 files changed, 35 insertions(+), 42 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2851cb60636ab..1c69d3b07c468 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ select = [ "UP013", "UP014", "UP017", + "UP018", "UP019", "UP020", "UP021", diff --git a/python/paddle/distributed/auto_parallel/operators/common.py b/python/paddle/distributed/auto_parallel/operators/common.py index f7ac557311f25..63b58df02c89a 100644 --- a/python/paddle/distributed/auto_parallel/operators/common.py +++ b/python/paddle/distributed/auto_parallel/operators/common.py @@ -440,9 +440,7 @@ def sync_and_scale_gradients(dist_ctx, op, dp_group, allreduce_var_names): OP_ROLE_KEY: OpRole.Backward, }, ) - allreduce_op._set_attr( - 'op_namescope', str('/') + ParallelMode.DataParallel - ) + allreduce_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel) added_ops.append(allreduce_op) if dist_ctx.gradient_scale: @@ -452,9 +450,7 @@ def sync_and_scale_gradients(dist_ctx, op, dp_group, allreduce_var_names): outputs={'Out': grad_var}, attrs={'scale': 1.0 / dp_degree, OP_ROLE_KEY: OpRole.Backward}, ) - scale_op._set_attr( - 'op_namescope', str('/') + ParallelMode.DataParallel - ) + scale_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel) added_ops.append(scale_op) dims_mapping = op_dist_attr.get_output_dims_mapping(grad_var.name) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py b/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py index 5fa88e55e94b1..2327793e459b3 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py @@ -169,7 +169,7 @@ def backward(ctx, *args, **kwargs): OP_ROLE_KEY: OpRole.Optimize, }, ) - allreduce_op._set_attr('op_namescope', str('/') + SyncMode.AmpFlagSync) + allreduce_op._set_attr('op_namescope', '/' + SyncMode.AmpFlagSync) cast_op2 = main_block.append_op( type='cast', inputs={'X': inf_var_int32}, diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index 5a0dd9c5e39e1..525420422af68 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -456,7 +456,7 @@ def _remove_no_need_ops_vars(self, block): ) # TODO better regular the usage of op namescope allreduce_op._set_attr( - 'op_namescope', str('/') + SyncMode.GlobalNormSync + 'op_namescope', '/' + SyncMode.GlobalNormSync ) self.clip_helper._init_dist_attr(allreduce_op) diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 93ce3e9898f9e..eba71a86f576e 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -492,7 +492,7 @@ def _insert_optimizer_broadcasts(self, main_block, startup_block): }, ) new_op._set_attr( - 'op_namescope', str('/') + ParallelMode.DataParallel + 'op_namescope', '/' + ParallelMode.DataParallel ) param_dist_attr = ( self._dist_context.get_tensor_dist_attr_for_program(param) @@ -545,7 +545,7 @@ def _shard_gradient_synchronization(self, main_block): else: op._set_attr("ring_id", self.outer_dp_group.id) op._set_attr( - 'op_namescope', str('/') + ParallelMode.DataParallel + 'op_namescope', '/' + ParallelMode.DataParallel ) # NOTE: @@ -843,9 +843,7 @@ def _fuse_overlap_parameter_comm_stage_two(self, sharding_info): }, ) self.op_to_stream_idx[new_op] = comm_stream_idx - new_op._set_attr( - 'op_namescope', str('/') + ParallelMode.DataParallel - ) + new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel) if self.enable_overlap: new_op.dist_attr.execution_stream = comm_stream new_op.dist_attr.scheduling_priority = ( @@ -1374,7 +1372,7 @@ def _overlap_grad_comm( }, ) new_op._set_attr( - 'op_namescope', str('/') + ParallelMode.DataParallel + 'op_namescope', '/' + ParallelMode.DataParallel ) if self.enable_overlap: @@ -1424,7 +1422,7 @@ def _insert_init_and_broadcast_op( OP_ROLE_KEY: op_role, }, ) - new_op._set_attr('op_namescope', str('/') + ParallelMode.DataParallel) + new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( new_op, broadcast_var_dist_attr.process_mesh, @@ -1484,7 +1482,7 @@ def _insert_reduce_op( naive_set_dist_op_attr_for_program_by_mesh_and_mapping( new_op, dist_attr.process_mesh, dist_attr.dims_mapping, dist_context ) - new_op._set_attr('op_namescope', str('/') + ParallelMode.DataParallel) + new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel) return new_op diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py index 71c346c9403a7..38f6089ee084c 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py @@ -16,8 +16,8 @@ import numpy as np -os.environ[str("FLAGS_check_nan_inf")] = str("1") -os.environ[str("GLOG_vmodule")] = str("nan_inf_utils_detail=10") +os.environ["FLAGS_check_nan_inf"] = "1" +os.environ["GLOG_vmodule"] = "nan_inf_utils_detail=10" import paddle import paddle.fluid as fluid diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py index ace6679096725..6f1b35d4a9716 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py @@ -16,8 +16,8 @@ import numpy as np -os.environ[str("FLAGS_check_nan_inf")] = str("1") -os.environ[str("GLOG_vmodule")] = str("nan_inf_utils_detail=10") +os.environ["FLAGS_check_nan_inf"] = "1" +os.environ["GLOG_vmodule"] = "nan_inf_utils_detail=10" import paddle import paddle.nn as nn diff --git a/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py b/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py index 0fe790615f89b..de761ea93597b 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py +++ b/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py @@ -18,7 +18,7 @@ from launch_function_helper import _find_free_port, wait -os.environ['GLOG_vmodule'] = str("gen_nccl_id_op*=10,gen_comm_id*=10") +os.environ['GLOG_vmodule'] = "gen_nccl_id_op*=10,gen_comm_id*=10" import paddle from paddle.fluid import core diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py index 7a006e3627c4d..511b0c13e859b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -42,7 +42,7 @@ def check(): ) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) b_np = np.random.uniform(-5, 5, (10, 20, 30)).astype(np.float32) - helper = LayerHelper(fluid.unique_name.generate(str("test")), act="relu") + helper = LayerHelper(fluid.unique_name.generate("test"), act="relu") func = helper.append_activation with fluid.dygraph.guard(fluid.core.CPUPlace()): a = fluid.dygraph.to_variable(a_np) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py index 90e6752dbf4de..c26263755bb64 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py @@ -32,7 +32,7 @@ def check(): ) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) - helper = LayerHelper(fluid.unique_name.generate(str("test")), act="relu") + helper = LayerHelper(fluid.unique_name.generate("test"), act="relu") func = helper.append_activation with fluid.dygraph.guard(fluid.core.CPUPlace()): a = fluid.dygraph.to_variable(a_np) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py index 1c406084105e6..ebc4dee6fffd4 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py @@ -25,8 +25,8 @@ def setUp(self): self._python_interp += " check_flags_mkldnn_ops_on_off.py" self.env = os.environ.copy() - self.env[str("DNNL_VERBOSE")] = str("1") - self.env[str("FLAGS_use_mkldnn")] = str("1") + self.env["DNNL_VERBOSE"] = "1" + self.env["FLAGS_use_mkldnn"] = "1" self.relu_regex = b"^onednn_verbose,exec,cpu,eltwise,.+alg:eltwise_relu alpha:0 beta:0,10x20x20" self.ew_add_regex = ( @@ -73,28 +73,28 @@ def test_flags_use_mkl_dnn_on_empty_off_empty(self): assert self.found(self.matmul_regex, out, err) def test_flags_use_mkl_dnn_on(self): - env = {str("FLAGS_tracer_mkldnn_ops_on"): str("relu")} + env = {"FLAGS_tracer_mkldnn_ops_on": "relu"} out, err = self.flags_use_mkl_dnn_common(env) assert self.found(self.relu_regex, out, err) assert self.not_found(self.ew_add_regex, out, err) assert self.not_found(self.matmul_regex, out, err) def test_flags_use_mkl_dnn_on_multiple(self): - env = {str("FLAGS_tracer_mkldnn_ops_on"): str("relu,elementwise_add")} + env = {"FLAGS_tracer_mkldnn_ops_on": "relu,elementwise_add"} out, err = self.flags_use_mkl_dnn_common(env) assert self.found(self.relu_regex, out, err) assert self.found(self.ew_add_regex, out, err) assert self.not_found(self.matmul_regex, out, err) def test_flags_use_mkl_dnn_off(self): - env = {str("FLAGS_tracer_mkldnn_ops_off"): str("matmul_v2")} + env = {"FLAGS_tracer_mkldnn_ops_off": "matmul_v2"} out, err = self.flags_use_mkl_dnn_common(env) assert self.found(self.relu_regex, out, err) assert self.found(self.ew_add_regex, out, err) assert self.not_found(self.matmul_regex, out, err) def test_flags_use_mkl_dnn_off_multiple(self): - env = {str("FLAGS_tracer_mkldnn_ops_off"): str("matmul_v2,relu")} + env = {"FLAGS_tracer_mkldnn_ops_off": "matmul_v2,relu"} out, err = self.flags_use_mkl_dnn_common(env) assert self.not_found(self.relu_regex, out, err) assert self.found(self.ew_add_regex, out, err) @@ -102,8 +102,8 @@ def test_flags_use_mkl_dnn_off_multiple(self): def test_flags_use_mkl_dnn_on_off(self): env = { - str("FLAGS_tracer_mkldnn_ops_on"): str("elementwise_add"), - str("FLAGS_tracer_mkldnn_ops_off"): str("matmul_v2"), + "FLAGS_tracer_mkldnn_ops_on": "elementwise_add", + "FLAGS_tracer_mkldnn_ops_off": "matmul_v2", } out, err = self.flags_use_mkl_dnn_common(env) assert self.not_found(self.relu_regex, out, err) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py index c8e3fe6b238a4..67274afd327bd 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py @@ -25,9 +25,9 @@ def setUp(self): self._python_interp += " check_flags_use_mkldnn.py" self.env = os.environ.copy() - self.env[str("GLOG_v")] = str("1") - self.env[str("DNNL_VERBOSE")] = str("1") - self.env[str("FLAGS_use_mkldnn")] = str("1") + self.env["GLOG_v"] = "1" + self.env["DNNL_VERBOSE"] = "1" + self.env["FLAGS_use_mkldnn"] = "1" self.relu_regex = b"^onednn_verbose,exec,cpu,eltwise,.+alg:eltwise_relu alpha:0 beta:0,10x20x30" diff --git a/python/paddle/fluid/tests/unittests/test_nan_inf.py b/python/paddle/fluid/tests/unittests/test_nan_inf.py index 0176727cb9984..4f0373c36894b 100644 --- a/python/paddle/fluid/tests/unittests/test_nan_inf.py +++ b/python/paddle/fluid/tests/unittests/test_nan_inf.py @@ -63,11 +63,9 @@ def setUp(self): super().setUp() # windows python have some bug with env, so need use str to pass ci # otherwise, "TypeError: environment can only contain strings" - self.env[str("PADDLE_INF_NAN_SKIP_OP")] = str("mul") - self.env[str("PADDLE_INF_NAN_SKIP_ROLE")] = str("loss") - self.env[str("PADDLE_INF_NAN_SKIP_VAR")] = str( - "elementwise_add:fc_0.tmp_1" - ) + self.env["PADDLE_INF_NAN_SKIP_OP"] = "mul" + self.env["PADDLE_INF_NAN_SKIP_ROLE"] = "loss" + self.env["PADDLE_INF_NAN_SKIP_VAR"] = "elementwise_add:fc_0.tmp_1" class TestNanInfCheckResult(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py b/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py index 883476300a2b8..6493825901803 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py @@ -21,7 +21,7 @@ from launch_function_helper import _find_free_port, wait -os.environ['GLOG_vmodule'] = str("gen_bkcl_id_op*=10,gen_comm_id*=10") +os.environ['GLOG_vmodule'] = "gen_bkcl_id_op*=10,gen_comm_id*=10" import paddle from paddle.fluid import core diff --git a/python/paddle/incubate/passes/ir.py b/python/paddle/incubate/passes/ir.py index cf6568a545f39..793588c3d4c31 100644 --- a/python/paddle/incubate/passes/ir.py +++ b/python/paddle/incubate/passes/ir.py @@ -34,7 +34,7 @@ class RegisterPassHelper: _register_helpers = list() - def __init__(self, pass_pairs, pass_type=str(), input_specs=dict()): + def __init__(self, pass_pairs, pass_type='', input_specs=dict()): self._pass_type = pass_type self._pass_pairs = pass_pairs self._input_specs = input_specs diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 45143b4ad956a..86c796f735cfb 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -994,7 +994,7 @@ def name_has_fc(var): for name in sorted(save_var_map.keys()): save_var_list.append(save_var_map[name]) - save_path = str() + save_path = '' if save_to_memory is False: save_path = os.path.join(os.path.normpath(dirname), filename)