Skip to content

Commit

Permalink
[CodeStyle][B009][B010] use normal property access instead of getattr…
Browse files Browse the repository at this point in the history
…/setattr (#51530)
  • Loading branch information
SigureMo authored Mar 16, 2023
1 parent d1e2c61 commit 2f2b1f2
Show file tree
Hide file tree
Showing 19 changed files with 99 additions and 100 deletions.
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ select = [

# NumPy-specific rules
"NPY001",

# Bugbear
"B009",
"B010",
]
unfixable = [
"NPY001"
Expand Down
5 changes: 1 addition & 4 deletions python/paddle/dataset/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,10 +138,7 @@ def fetch_all():
if "fetch" in dir(
importlib.import_module("paddle.dataset.%s" % module_name)
):
getattr(
importlib.import_module("paddle.dataset.%s" % module_name),
"fetch",
)()
importlib.import_module('paddle.dataset.%s' % module_name).fetch()


def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/fleet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1282,7 +1282,7 @@ def _minimize_impl(
self.origin_main_program = loss.block.program
# add distributed attr
if not hasattr(self.origin_main_program, "distributed_info_"):
setattr(self.origin_main_program, "distributed_info_", dict())
self.origin_main_program.distributed_info_ = dict()
self.origin_main_program.distributed_info_[
"dp_degree"
] = self._user_defined_strategy.sharding_configs["dp_degree"]
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/distributed/fleet/layers/mpu/mp_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def __init__(

self.weight.is_distributed = True if self.is_mp else False
if self.weight.is_distributed:
setattr(self.weight, "split_axis", 0)
self.weight.split_axis = 0

def forward(self, x):
if self.is_mp:
Expand Down Expand Up @@ -277,7 +277,7 @@ def __init__(
self.weight.is_distributed = True if self.is_mp else False

if self.weight.is_distributed:
setattr(self.weight, "split_axis", 1)
self.weight.split_axis = 1

if has_bias:
# initialize bias to zero like Megatron
Expand All @@ -289,7 +289,7 @@ def __init__(
)
self.bias.is_distributed = True if self.is_mp else False
if self.bias.is_distributed:
setattr(self.bias, "split_axis", 0)
self.bias.split_axis = 0
else:
self.bias = None

Expand Down Expand Up @@ -443,7 +443,7 @@ def __init__(

self.weight.is_distributed = True if self.is_mp else False
if self.weight.is_distributed:
setattr(self.weight, "split_axis", 0)
self.weight.split_axis = 0

if has_bias:
self.bias = self.create_parameter(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,7 @@ def _synchronize_shared_weights(self):

for param in comm['layer'].parameters():
if self.global_rank != min(comm['ranks']):
setattr(param, 'is_firstly_shared', False)
param.is_firstly_shared = False

def allreduce_shared_weight_gradients(self):
for key, comm in self.shared_comm.items():
Expand Down Expand Up @@ -641,7 +641,7 @@ def _build_layer_impl(self, start, end):
for param in self.shared_layers[
layer.layer_name
].parameters():
setattr(param, "is_firstly_shared", True)
param.is_firstly_shared = True

if layer.forward_func is None:
run_function.append(self.shared_layers[layer.layer_name])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1047,18 +1047,18 @@ def _create_params_grad(trainable_params, param2buffer_size, task_flow):

def _PartitionParam(param):
if not hasattr(param, "fw_storage"):
setattr(param, "fw_storage", None)
setattr(param, "bw_storage", None)
setattr(param, "master_weight", None)
setattr(param, "status", "all")
setattr(param, "use_count", 0)
param.fw_storage = None
param.bw_storage = None
param.master_weight = None
param.status = "all"
param.use_count = 0
return param


def _UnsliceParam(param):
if not hasattr(param, "unslice"):
setattr(param, "unslice", True)
setattr(param, "master_weight", None)
param.unslice = True
param.master_weight = None
return param


Expand All @@ -1078,11 +1078,11 @@ def _VarBaseWrapper(param):

def _OptimizerWrapper(optimizer, offload, group, update_params_slice):
if not hasattr(optimizer, "_optim"):
setattr(optimizer, "_optim", optimizer)
setattr(optimizer, "offload", offload)
setattr(optimizer, "_group", group)
setattr(optimizer, "update_scaler", None)
setattr(optimizer, "update_slice", update_params_slice)
optimizer._optim = optimizer
optimizer.offload = offload
optimizer._group = group
optimizer.update_scaler = None
optimizer.update_slice = update_params_slice
return optimizer


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def tearDownClass(cls):

def custom_raw_relu(self, x):
module = importlib.import_module(MODULE_NAME)
custom_raw_relu_op = getattr(module, "custom_raw_relu")
custom_raw_relu_op = module.custom_raw_relu
self.assertIsNotNone(custom_raw_relu_op)
return custom_raw_relu_op(x)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def forward(self):


net = ForwardNotExist()
setattr(net, "forward", "A string so that convert forward will fail")
net.forward = "A string so that convert forward will fail"


class TestConvertCall(unittest.TestCase):
Expand Down
13 changes: 7 additions & 6 deletions python/paddle/fluid/tests/unittests/eager_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ def is_bfloat16_op(self):
)
or (
hasattr(self, 'mkldnn_data_type')
and getattr(self, 'mkldnn_data_type') == "bfloat16"
and self.mkldnn_data_type == "bfloat16"
)
or (
hasattr(self, 'attrs')
Expand All @@ -469,7 +469,7 @@ def is_float16_op(self):
)
or (
hasattr(self, 'mkldnn_data_type')
and getattr(self, 'mkldnn_data_type') == "float16"
and self.mkldnn_data_type == "float16"
)
or (
hasattr(self, 'attrs')
Expand Down Expand Up @@ -1713,7 +1713,7 @@ def _is_skip_name(self, name):
prim_checker = PrimForwardChecker(self, place)
prim_checker.check()
# Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
setattr(self.__class__, 'check_prim', True)
self.__class__.check_prim = True
self.__class__.op_type = self.op_type
# set some flags by the combination of arguments.
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
Expand All @@ -1728,8 +1728,9 @@ def _is_skip_name(self, name):
if self.is_mkldnn_op():
check_dygraph = False

if hasattr(self, 'force_fp32_output') and getattr(
self, 'force_fp32_output'
if (
hasattr(self, 'force_fp32_output')
and self.force_fp32_output
):
atol = 1e-2 if atol < 1e-2 else atol
else:
Expand Down Expand Up @@ -2078,7 +2079,7 @@ def check_grad_with_place(
)
prim_grad_checker.check()
# Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
setattr(self.__class__, 'check_prim', True)
self.__class__.check_prim = True
self._check_grad_helper()
if only_check_prim:
return
Expand Down
13 changes: 7 additions & 6 deletions python/paddle/fluid/tests/unittests/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ def is_bfloat16_op(self):
)
or (
hasattr(self, 'mkldnn_data_type')
and getattr(self, 'mkldnn_data_type') == "bfloat16"
and self.mkldnn_data_type == "bfloat16"
)
or (
hasattr(self, 'attrs')
Expand All @@ -471,7 +471,7 @@ def is_float16_op(self):
)
or (
hasattr(self, 'mkldnn_data_type')
and getattr(self, 'mkldnn_data_type') == "float16"
and self.mkldnn_data_type == "float16"
)
or (
hasattr(self, 'attrs')
Expand Down Expand Up @@ -1502,7 +1502,7 @@ def check_output_with_place(
prim_checker = PrimForwardChecker(self, place)
prim_checker.check()
# Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
setattr(self.__class__, 'check_prim', True)
self.__class__.check_prim = True
self.__class__.op_type = self.op_type
# disable legacy dygraph check when check_eager is True
if check_eager:
Expand Down Expand Up @@ -1907,8 +1907,9 @@ def _is_skip_name(self, name):
if self.is_mkldnn_op():
check_dygraph = False
check_eager = False
if hasattr(self, 'force_fp32_output') and getattr(
self, 'force_fp32_output'
if (
hasattr(self, 'force_fp32_output')
and self.force_fp32_output
):
atol = 1e-2 if atol < 1e-2 else atol
else:
Expand Down Expand Up @@ -2288,7 +2289,7 @@ def check_grad_with_place(
)
prim_grad_checker.check()
# Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
setattr(self.__class__, 'check_prim', True)
self.__class__.check_prim = True
self._check_grad_helper()
if only_check_prim:
return
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/hapi/model_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,8 +312,8 @@ def hook(layer, input, output):
params += np.prod(v.shape)

try:
if (getattr(getattr(layer, k), 'trainable')) and (
not getattr(getattr(layer, k), 'stop_gradient')
if (getattr(layer, k).trainable) and (
not getattr(layer, k).stop_gradient
):
summary[m_key]["trainable_params"] += np.prod(v.shape)
summary[m_key]["trainable"] = True
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/incubate/distributed/utils/io/save_for_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def _get_dims_mapping(dist_parameter, mp_group):

dist_shape = np.array(dist_parameter.shape)
if hasattr(dist_parameter, "split_axis"):
aixs = getattr(dist_parameter, "split_axis")
aixs = dist_parameter.split_axis
mapping = [-1 for _ in dist_shape]
mapping[aixs] = 1
logger.debug(
Expand Down Expand Up @@ -351,7 +351,7 @@ def _get_wrapped_dist_state_dict(dist_state_dict):
logger.debug(f"not first used : {v.name}")
continue
wrapped_state_dict[name_mapping[v.name]] = v
setattr(v, "dims_mapping", _get_dims_mapping(v, mp_group))
v.dims_mapping = _get_dims_mapping(v, mp_group)
logger.debug(
f"saving param: {v.name} -> {name_mapping[v.name]} shape: {v.shape}"
)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/convert_call_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ def dyfunc(x):
# Bound mothod will be convert into plain function after `convert_to_static`.
# So descriptor mechanism is used to bound `self` instance on function to
# keep it as bound method.
setattr(func, 'forward', forward_func.__get__(func))
func.forward = forward_func.__get__(func)
except (IOError, OSError, TypeError):
# NOTE: func.forward may have been decorated.
func_self = None if func_self else func_self
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/jit/dy2static/program_translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,8 +314,8 @@ def __init__(self, function, input_spec=None, **kwargs):
# save the instance `self` while decorating a method of class.

if inspect.ismethod(function):
self._dygraph_function = getattr(function, '__func__')
self._class_instance = getattr(function, '__self__')
self._dygraph_function = function.__func__
self._class_instance = function.__self__

if not hasattr(self._class_instance, '_original_funcs'):
raise TypeError(
Expand Down Expand Up @@ -885,7 +885,7 @@ def __init__(self, func, class_instance, with_hook=False):
self.need_apply_hook = (
with_hook
and isinstance(self.class_instance, layers.Layer)
and getattr(func, "__name__") == "forward"
and func.__name__ == "forward"
)

def apply_pre_hooks(self, inputs):
Expand Down
12 changes: 4 additions & 8 deletions python/paddle/jit/dy2static/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ def func_prefix(func):
# The 'forward' or 'another_forward' of 'TranslatedLayer' cannot be obtained
# through 'func_name'. So set the special function name '__i_m_p_l__'.
if hasattr(module, '__i_m_p_l__'):
callable_func = getattr(module, '__i_m_p_l__')
callable_func = module.__i_m_p_l__
callable_func.__name__ = func_name
elif hasattr(module, func_name):
callable_func = getattr(module, func_name)
Expand Down Expand Up @@ -1120,11 +1120,11 @@ def __init__(self, root_node):

def _reset_name_scope(self, node):
# always reset the node as empty namescope.
setattr(node, "pd_scope", NameScope())
node.pd_scope = NameScope()

def _get_name_scope(self, node):
if not hasattr(node, "pd_scope"):
setattr(node, "pd_scope", NameScope())
node.pd_scope = NameScope()
return node.pd_scope

def _current_name_scope(self):
Expand Down Expand Up @@ -1224,11 +1224,7 @@ def post_func():
)

def pre_func():
setattr(
node,
"before_created",
self._nearest_function_scope().existed_vars(),
)
node.before_created = self._nearest_function_scope().existed_vars()

self._visit_scope_node(node, pre_func, post_func)

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def grid_sample(
'use_cudnn',
use_cudnn,
)
out = getattr(_legacy_C_ops, 'grid_sampler')(x, grid, *attrs)
out = _legacy_C_ops.grid_sampler(x, grid, *attrs)
else:
helper = LayerHelper("grid_sample", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sample')
Expand Down
20 changes: 10 additions & 10 deletions python/paddle/nn/quant/qat/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,18 @@ def __init__(self, layer: Layer, q_config):
super(QuantedConv2D, self).__init__()

# For Conv2D
self._groups = getattr(layer, '_groups')
self._stride = getattr(layer, '_stride')
self._padding = getattr(layer, '_padding')
self._padding_mode = getattr(layer, '_padding_mode')
self._groups = layer._groups
self._stride = layer._stride
self._padding = layer._padding
self._padding_mode = layer._padding_mode
if self._padding_mode != 'zeros':
self._reversed_padding_repeated_twice = getattr(
layer, '_reversed_padding_repeated_twice'
self._reversed_padding_repeated_twice = (
layer._reversed_padding_repeated_twice
)
self._dilation = getattr(layer, '_dilation')
self._data_format = getattr(layer, '_data_format')
self.weight = getattr(layer, 'weight')
self.bias = getattr(layer, 'bias')
self._dilation = layer._dilation
self._data_format = layer._data_format
self.weight = layer.weight
self.bias = layer.bias

self.weight_quanter = None
self.activation_quanter = None
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/nn/quant/qat/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ class QuantedLinear(ConvertibleQuantedLayer):
def __init__(self, layer: Layer, q_config):
super(QuantedLinear, self).__init__()
# For Linear
self.weight = getattr(layer, 'weight')
self.bias = getattr(layer, 'bias')
self.name = getattr(layer, 'name')
self.weight = layer.weight
self.bias = layer.bias
self.name = layer.name
# For FakeQuant

self.weight_quanter = None
Expand Down
Loading

0 comments on commit 2f2b1f2

Please sign in to comment.