diff --git a/pyproject.toml b/pyproject.toml index f244f6c1d45e4..da0f272d4602b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,10 @@ select = [ # NumPy-specific rules "NPY001", + + # Bugbear + "B009", + "B010", ] unfixable = [ "NPY001" diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index eab4d37676190..fb8c4ba969164 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -138,10 +138,7 @@ def fetch_all(): if "fetch" in dir( importlib.import_module("paddle.dataset.%s" % module_name) ): - getattr( - importlib.import_module("paddle.dataset.%s" % module_name), - "fetch", - )() + importlib.import_module('paddle.dataset.%s' % module_name).fetch() def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 96a964b6999cd..55650a5b5f6af 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -1282,7 +1282,7 @@ def _minimize_impl( self.origin_main_program = loss.block.program # add distributed attr if not hasattr(self.origin_main_program, "distributed_info_"): - setattr(self.origin_main_program, "distributed_info_", dict()) + self.origin_main_program.distributed_info_ = dict() self.origin_main_program.distributed_info_[ "dp_degree" ] = self._user_defined_strategy.sharding_configs["dp_degree"] diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py index 794acde2ec88a..c8914256021b8 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py @@ -143,7 +143,7 @@ def __init__( self.weight.is_distributed = True if self.is_mp else False if self.weight.is_distributed: - setattr(self.weight, "split_axis", 0) + self.weight.split_axis = 0 def forward(self, x): if self.is_mp: @@ -277,7 +277,7 @@ def __init__( self.weight.is_distributed = True if self.is_mp else False if self.weight.is_distributed: - setattr(self.weight, "split_axis", 1) + self.weight.split_axis = 1 if has_bias: # initialize bias to zero like Megatron @@ -289,7 +289,7 @@ def __init__( ) self.bias.is_distributed = True if self.is_mp else False if self.bias.is_distributed: - setattr(self.bias, "split_axis", 0) + self.bias.split_axis = 0 else: self.bias = None @@ -443,7 +443,7 @@ def __init__( self.weight.is_distributed = True if self.is_mp else False if self.weight.is_distributed: - setattr(self.weight, "split_axis", 0) + self.weight.split_axis = 0 if has_bias: self.bias = self.create_parameter( diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index 3b164d2afa1cb..3c1d615ed63f1 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -493,7 +493,7 @@ def _synchronize_shared_weights(self): for param in comm['layer'].parameters(): if self.global_rank != min(comm['ranks']): - setattr(param, 'is_firstly_shared', False) + param.is_firstly_shared = False def allreduce_shared_weight_gradients(self): for key, comm in self.shared_comm.items(): @@ -641,7 +641,7 @@ def _build_layer_impl(self, start, end): for param in self.shared_layers[ layer.layer_name ].parameters(): - setattr(param, "is_firstly_shared", True) + param.is_firstly_shared = True if layer.forward_func is None: run_function.append(self.shared_layers[layer.layer_name]) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py index 5fb2e9a58d501..9e440f3fe671b 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py @@ -1047,18 +1047,18 @@ def _create_params_grad(trainable_params, param2buffer_size, task_flow): def _PartitionParam(param): if not hasattr(param, "fw_storage"): - setattr(param, "fw_storage", None) - setattr(param, "bw_storage", None) - setattr(param, "master_weight", None) - setattr(param, "status", "all") - setattr(param, "use_count", 0) + param.fw_storage = None + param.bw_storage = None + param.master_weight = None + param.status = "all" + param.use_count = 0 return param def _UnsliceParam(param): if not hasattr(param, "unslice"): - setattr(param, "unslice", True) - setattr(param, "master_weight", None) + param.unslice = True + param.master_weight = None return param @@ -1078,11 +1078,11 @@ def _VarBaseWrapper(param): def _OptimizerWrapper(optimizer, offload, group, update_params_slice): if not hasattr(optimizer, "_optim"): - setattr(optimizer, "_optim", optimizer) - setattr(optimizer, "offload", offload) - setattr(optimizer, "_group", group) - setattr(optimizer, "update_scaler", None) - setattr(optimizer, "update_slice", update_params_slice) + optimizer._optim = optimizer + optimizer.offload = offload + optimizer._group = group + optimizer.update_scaler = None + optimizer.update_slice = update_params_slice return optimizer diff --git a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py index 3fcf94e2f1fda..9762d29c48c83 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py @@ -67,7 +67,7 @@ def tearDownClass(cls): def custom_raw_relu(self, x): module = importlib.import_module(MODULE_NAME) - custom_raw_relu_op = getattr(module, "custom_raw_relu") + custom_raw_relu_op = module.custom_raw_relu self.assertIsNotNone(custom_raw_relu_op) return custom_raw_relu_op(x) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py index 812abb18ff356..895203946018f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py @@ -31,7 +31,7 @@ def forward(self): net = ForwardNotExist() -setattr(net, "forward", "A string so that convert forward will fail") +net.forward = "A string so that convert forward will fail" class TestConvertCall(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/eager_op_test.py b/python/paddle/fluid/tests/unittests/eager_op_test.py index 9615a9c81531a..3aeb0bc87ce4e 100644 --- a/python/paddle/fluid/tests/unittests/eager_op_test.py +++ b/python/paddle/fluid/tests/unittests/eager_op_test.py @@ -449,7 +449,7 @@ def is_bfloat16_op(self): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "bfloat16" + and self.mkldnn_data_type == "bfloat16" ) or ( hasattr(self, 'attrs') @@ -469,7 +469,7 @@ def is_float16_op(self): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "float16" + and self.mkldnn_data_type == "float16" ) or ( hasattr(self, 'attrs') @@ -1713,7 +1713,7 @@ def _is_skip_name(self, name): prim_checker = PrimForwardChecker(self, place) prim_checker.check() # Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self.__class__.op_type = self.op_type # set some flags by the combination of arguments. self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) @@ -1728,8 +1728,9 @@ def _is_skip_name(self, name): if self.is_mkldnn_op(): check_dygraph = False - if hasattr(self, 'force_fp32_output') and getattr( - self, 'force_fp32_output' + if ( + hasattr(self, 'force_fp32_output') + and self.force_fp32_output ): atol = 1e-2 if atol < 1e-2 else atol else: @@ -2078,7 +2079,7 @@ def check_grad_with_place( ) prim_grad_checker.check() # Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self._check_grad_helper() if only_check_prim: return diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 74e35abe5d29c..cdb521fe258f6 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -451,7 +451,7 @@ def is_bfloat16_op(self): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "bfloat16" + and self.mkldnn_data_type == "bfloat16" ) or ( hasattr(self, 'attrs') @@ -471,7 +471,7 @@ def is_float16_op(self): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "float16" + and self.mkldnn_data_type == "float16" ) or ( hasattr(self, 'attrs') @@ -1502,7 +1502,7 @@ def check_output_with_place( prim_checker = PrimForwardChecker(self, place) prim_checker.check() # Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self.__class__.op_type = self.op_type # disable legacy dygraph check when check_eager is True if check_eager: @@ -1907,8 +1907,9 @@ def _is_skip_name(self, name): if self.is_mkldnn_op(): check_dygraph = False check_eager = False - if hasattr(self, 'force_fp32_output') and getattr( - self, 'force_fp32_output' + if ( + hasattr(self, 'force_fp32_output') + and self.force_fp32_output ): atol = 1e-2 if atol < 1e-2 else atol else: @@ -2288,7 +2289,7 @@ def check_grad_with_place( ) prim_grad_checker.check() # Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self._check_grad_helper() if only_check_prim: return diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index d6234eacd15af..c36b9552be4aa 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -312,8 +312,8 @@ def hook(layer, input, output): params += np.prod(v.shape) try: - if (getattr(getattr(layer, k), 'trainable')) and ( - not getattr(getattr(layer, k), 'stop_gradient') + if (getattr(layer, k).trainable) and ( + not getattr(layer, k).stop_gradient ): summary[m_key]["trainable_params"] += np.prod(v.shape) summary[m_key]["trainable"] = True diff --git a/python/paddle/incubate/distributed/utils/io/save_for_auto.py b/python/paddle/incubate/distributed/utils/io/save_for_auto.py index 3008201d2fd11..701c22e50fc76 100644 --- a/python/paddle/incubate/distributed/utils/io/save_for_auto.py +++ b/python/paddle/incubate/distributed/utils/io/save_for_auto.py @@ -219,7 +219,7 @@ def _get_dims_mapping(dist_parameter, mp_group): dist_shape = np.array(dist_parameter.shape) if hasattr(dist_parameter, "split_axis"): - aixs = getattr(dist_parameter, "split_axis") + aixs = dist_parameter.split_axis mapping = [-1 for _ in dist_shape] mapping[aixs] = 1 logger.debug( @@ -351,7 +351,7 @@ def _get_wrapped_dist_state_dict(dist_state_dict): logger.debug(f"not first used : {v.name}") continue wrapped_state_dict[name_mapping[v.name]] = v - setattr(v, "dims_mapping", _get_dims_mapping(v, mp_group)) + v.dims_mapping = _get_dims_mapping(v, mp_group) logger.debug( f"saving param: {v.name} -> {name_mapping[v.name]} shape: {v.shape}" ) diff --git a/python/paddle/jit/dy2static/convert_call_func.py b/python/paddle/jit/dy2static/convert_call_func.py index acbbe0e5d74f0..b5ce104d4dfe0 100644 --- a/python/paddle/jit/dy2static/convert_call_func.py +++ b/python/paddle/jit/dy2static/convert_call_func.py @@ -312,7 +312,7 @@ def dyfunc(x): # Bound mothod will be convert into plain function after `convert_to_static`. # So descriptor mechanism is used to bound `self` instance on function to # keep it as bound method. - setattr(func, 'forward', forward_func.__get__(func)) + func.forward = forward_func.__get__(func) except (IOError, OSError, TypeError): # NOTE: func.forward may have been decorated. func_self = None if func_self else func_self diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index 5b3eae6f8e3b6..9fbf2b6103538 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -314,8 +314,8 @@ def __init__(self, function, input_spec=None, **kwargs): # save the instance `self` while decorating a method of class. if inspect.ismethod(function): - self._dygraph_function = getattr(function, '__func__') - self._class_instance = getattr(function, '__self__') + self._dygraph_function = function.__func__ + self._class_instance = function.__self__ if not hasattr(self._class_instance, '_original_funcs'): raise TypeError( @@ -885,7 +885,7 @@ def __init__(self, func, class_instance, with_hook=False): self.need_apply_hook = ( with_hook and isinstance(self.class_instance, layers.Layer) - and getattr(func, "__name__") == "forward" + and func.__name__ == "forward" ) def apply_pre_hooks(self, inputs): diff --git a/python/paddle/jit/dy2static/utils.py b/python/paddle/jit/dy2static/utils.py index 0752e32b3fb10..28c0a0cfd1645 100644 --- a/python/paddle/jit/dy2static/utils.py +++ b/python/paddle/jit/dy2static/utils.py @@ -576,7 +576,7 @@ def func_prefix(func): # The 'forward' or 'another_forward' of 'TranslatedLayer' cannot be obtained # through 'func_name'. So set the special function name '__i_m_p_l__'. if hasattr(module, '__i_m_p_l__'): - callable_func = getattr(module, '__i_m_p_l__') + callable_func = module.__i_m_p_l__ callable_func.__name__ = func_name elif hasattr(module, func_name): callable_func = getattr(module, func_name) @@ -1120,11 +1120,11 @@ def __init__(self, root_node): def _reset_name_scope(self, node): # always reset the node as empty namescope. - setattr(node, "pd_scope", NameScope()) + node.pd_scope = NameScope() def _get_name_scope(self, node): if not hasattr(node, "pd_scope"): - setattr(node, "pd_scope", NameScope()) + node.pd_scope = NameScope() return node.pd_scope def _current_name_scope(self): @@ -1224,11 +1224,7 @@ def post_func(): ) def pre_func(): - setattr( - node, - "before_created", - self._nearest_function_scope().existed_vars(), - ) + node.before_created = self._nearest_function_scope().existed_vars() self._visit_scope_node(node, pre_func, post_func) diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 1acf1bcbc5f74..b7cebf4a58fbd 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -320,7 +320,7 @@ def grid_sample( 'use_cudnn', use_cudnn, ) - out = getattr(_legacy_C_ops, 'grid_sampler')(x, grid, *attrs) + out = _legacy_C_ops.grid_sampler(x, grid, *attrs) else: helper = LayerHelper("grid_sample", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sample') diff --git a/python/paddle/nn/quant/qat/conv.py b/python/paddle/nn/quant/qat/conv.py index 4c8e6915c15ad..f2ffc7b103ad7 100644 --- a/python/paddle/nn/quant/qat/conv.py +++ b/python/paddle/nn/quant/qat/conv.py @@ -30,18 +30,18 @@ def __init__(self, layer: Layer, q_config): super(QuantedConv2D, self).__init__() # For Conv2D - self._groups = getattr(layer, '_groups') - self._stride = getattr(layer, '_stride') - self._padding = getattr(layer, '_padding') - self._padding_mode = getattr(layer, '_padding_mode') + self._groups = layer._groups + self._stride = layer._stride + self._padding = layer._padding + self._padding_mode = layer._padding_mode if self._padding_mode != 'zeros': - self._reversed_padding_repeated_twice = getattr( - layer, '_reversed_padding_repeated_twice' + self._reversed_padding_repeated_twice = ( + layer._reversed_padding_repeated_twice ) - self._dilation = getattr(layer, '_dilation') - self._data_format = getattr(layer, '_data_format') - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') + self._dilation = layer._dilation + self._data_format = layer._data_format + self.weight = layer.weight + self.bias = layer.bias self.weight_quanter = None self.activation_quanter = None diff --git a/python/paddle/nn/quant/qat/linear.py b/python/paddle/nn/quant/qat/linear.py index b089486531a1a..c0e015ce51c86 100644 --- a/python/paddle/nn/quant/qat/linear.py +++ b/python/paddle/nn/quant/qat/linear.py @@ -28,9 +28,9 @@ class QuantedLinear(ConvertibleQuantedLayer): def __init__(self, layer: Layer, q_config): super(QuantedLinear, self).__init__() # For Linear - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, 'name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer.name # For FakeQuant self.weight_quanter = None diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 257009a8ff1fb..9e7b4c55ba49e 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -533,18 +533,18 @@ def __init__( ): super().__init__() # For Conv2D - self._groups = getattr(layer, '_groups') - self._stride = getattr(layer, '_stride') - self._padding = getattr(layer, '_padding') - self._padding_mode = getattr(layer, '_padding_mode') + self._groups = layer._groups + self._stride = layer._stride + self._padding = layer._padding + self._padding_mode = layer._padding_mode if self._padding_mode != 'zeros': - self._reversed_padding_repeated_twice = getattr( - layer, '_reversed_padding_repeated_twice' + self._reversed_padding_repeated_twice = ( + layer._reversed_padding_repeated_twice ) - self._dilation = getattr(layer, '_dilation') - self._data_format = getattr(layer, '_data_format') - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') + self._dilation = layer._dilation + self._data_format = layer._data_format + self.weight = layer.weight + self.bias = layer.bias # For FakeQuant self._conv2d_quant_axis = 0 @@ -654,14 +654,14 @@ def __init__( """ super().__init__() # For Conv2DTranspose - self._groups = getattr(layer, '_groups') - self._stride = getattr(layer, '_stride') - self._padding = getattr(layer, '_padding') - self._output_padding = getattr(layer, 'output_padding') - self._dilation = getattr(layer, '_dilation') - self._data_format = getattr(layer, '_data_format') - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') + self._groups = layer._groups + self._stride = layer._stride + self._padding = layer._padding + self._output_padding = layer.output_padding + self._dilation = layer._dilation + self._data_format = layer._data_format + self.weight = layer.weight + self.bias = layer.bias # For FakeQuant self._conv2d_transpose_quant_axis = 1 if weight_quant_layer is not None: @@ -748,9 +748,9 @@ def __init__( ): super().__init__() # For Linear - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, 'name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer.name # For FakeQuant self._linear_quant_axis = 1 @@ -829,15 +829,15 @@ def __init__( act_quant_layer is None ), "When quantizing ColumnParallelLinear, act_quant_layer should be None." - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, '_name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer._name # For FakeQuant self._linear_quant_axis = 1 - self.is_mp = getattr(layer, 'is_mp') - self.model_parallel_group = getattr(layer, 'model_parallel_group') - self.gather_output = getattr(layer, 'gather_output') + self.is_mp = layer.is_mp + self.model_parallel_group = layer.model_parallel_group + self.gather_output = layer.gather_output self._fake_quant_weight = _get_fake_quant_type( weight_quantize_type, @@ -923,15 +923,15 @@ def __init__( ), "When quantizing RowParallelLinear, act_quant_layer cannot defined by yourself." # For Linear - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, '_name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer._name # For FakeQuant self._linear_quant_axis = 1 - self.input_is_parallel = getattr(layer, 'input_is_parallel') - self.is_mp = getattr(layer, 'is_mp') - self.model_parallel_group = getattr(layer, 'model_parallel_group') + self.input_is_parallel = layer.input_is_parallel + self.is_mp = layer.is_mp + self.model_parallel_group = layer.model_parallel_group self._fake_quant_weight = _get_fake_quant_type( weight_quantize_type,