diff --git a/python/paddle/audio/functional/functional.py b/python/paddle/audio/functional/functional.py index fada4e177542d..60dc359ee9245 100644 --- a/python/paddle/audio/functional/functional.py +++ b/python/paddle/audio/functional/functional.py @@ -250,7 +250,7 @@ def compute_fbank_matrix( if norm == 'slaney': enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels]) weights *= enorm.unsqueeze(1) - elif isinstance(norm, int) or isinstance(norm, float): + elif isinstance(norm, (int, float)): weights = paddle.nn.functional.normalize(weights, p=norm, axis=-1) return weights diff --git a/python/paddle/distributed/auto_parallel/dist_op.py b/python/paddle/distributed/auto_parallel/dist_op.py index 7e64c7a56f03f..908fad25aaf98 100644 --- a/python/paddle/distributed/auto_parallel/dist_op.py +++ b/python/paddle/distributed/auto_parallel/dist_op.py @@ -323,7 +323,7 @@ def __call__(self, *args, **kwargs): output = self._serial_op(*args, **kwargs) new_op_size = len(cur_block.ops) - if isinstance(output, tuple) or isinstance(output, list): + if isinstance(output, (tuple, list)): new_output = list(output) elif isinstance(output, Variable): new_output = [output] diff --git a/python/paddle/distributed/auto_parallel/reshard.py b/python/paddle/distributed/auto_parallel/reshard.py index 6fc9b9d27eee2..a711b40e939ad 100644 --- a/python/paddle/distributed/auto_parallel/reshard.py +++ b/python/paddle/distributed/auto_parallel/reshard.py @@ -1948,9 +1948,7 @@ def parse_op_desc( ) idx = idx_list[0] - elif isinstance(op_desc, SliceOpDesc) or isinstance( - op_desc, AllGatherConcatOpDesc - ): + elif isinstance(op_desc, (SliceOpDesc, AllGatherConcatOpDesc)): target_tensor = None if isinstance(op_desc, SliceOpDesc): assert ( diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index dceb425ceb69d..6fbefb4412e75 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -425,7 +425,7 @@ def _params_check(self, config): def feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist): def reader(batch_size, fn, dim): data = [] - if isinstance(dim, list) or isinstance(dim, tuple): + if isinstance(dim, (list, tuple)): shape = list(dim) _temp = 1 for x in dim: diff --git a/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py index 2973d4d3130f9..fc9cf107f8667 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py @@ -45,11 +45,14 @@ def _can_apply(self): if self.role_maker._worker_num() <= 1: return False - return ( - isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum) - or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) - or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) - or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD) + return isinstance( + self.inner_opt, + ( + paddle.optimizer.momentum.Momentum, + paddle.fluid.optimizer.Momentum, + paddle.optimizer.sgd.SGD, + paddle.fluid.optimizer.SGD, + ), ) def _disable_strategy(self, dist_strategy): @@ -228,11 +231,14 @@ def _can_apply(self): if self.role_maker._worker_num() <= 1: return False - return ( - isinstance(self.inner_opt, paddle.optimizer.Momentum) - or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) - or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) - or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD) + return isinstance( + self.inner_opt, + ( + paddle.optimizer.Momentum, + paddle.fluid.optimizer.Momentum, + paddle.optimizer.sgd.SGD, + paddle.fluid.optimizer.SGD, + ), ) def _disable_strategy(self, dist_strategy): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py index 623076ede7384..49ff0e5685bd4 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py @@ -795,9 +795,7 @@ def random_to_skip(): if isinstance(threshold, float): atol = threshold rtol = 1e-8 - elif isinstance(threshold, list) or isinstance( - threshold, tuple - ): + elif isinstance(threshold, (list, tuple)): atol = threshold[0] rtol = threshold[1] else: diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py index cf81403d20d70..c748edfbe8c0b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py @@ -110,7 +110,7 @@ def setUp(self): scale_w = 0 if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): scale_h = float(self.scale) scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py index 9c54790ac2579..f68bb04d8b9e2 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py @@ -104,7 +104,7 @@ def setUp(self): scale_w = 0 if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): scale_h = float(self.scale) scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index 3e226cfdb262b..b0ea2d50b1a29 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -37,17 +37,15 @@ def bicubic_interp_test( align_corners=True, align_mode=0, ): - if isinstance(scale, float) or isinstance(scale, int): + if isinstance(scale, (float, int)): scale_list = [] for _ in range(len(x.shape) - 2): scale_list.append(scale) scale = list(map(float, scale_list)) - elif isinstance(scale, list) or isinstance(scale, tuple): + elif isinstance(scale, (list, tuple)): scale = list(map(float, scale)) if SizeTensor is not None: - if not isinstance(SizeTensor, list) and not isinstance( - SizeTensor, tuple - ): + if not isinstance(SizeTensor, (list, tuple)): SizeTensor = [SizeTensor] return paddle._C_ops.bicubic_interp( x, @@ -197,7 +195,7 @@ def setUp(self): in_w = self.input_shape[2] if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -236,7 +234,7 @@ def setUp(self): 'data_layout': self.data_layout, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py index 273b4a0198788..e682d1bef29d8 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py @@ -37,12 +37,12 @@ def bilinear_interp_test( align_corners=True, align_mode=0, ): - if isinstance(scale, float) or isinstance(scale, int): + if isinstance(scale, (float, int)): scale_list = [] for _ in range(len(x.shape) - 2): scale_list.append(scale) scale = list(map(float, scale_list)) - elif isinstance(scale, list) or isinstance(scale, tuple): + elif isinstance(scale, (list, tuple)): scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( @@ -169,7 +169,7 @@ def setUp(self): scale_h = 0 scale_w = 0 if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -210,7 +210,7 @@ def setUp(self): 'data_layout': self.data_layout, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -363,7 +363,7 @@ def setUp(self): ).astype("uint8") if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -400,7 +400,7 @@ def setUp(self): 'align_mode': self.align_mode, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -537,7 +537,7 @@ def setUp(self): if self.scale_by_1Dtensor: self.inputs['Scale'] = np.array([self.scale]).astype("float32") elif self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -564,7 +564,7 @@ def setUp(self): self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index 4d82ffb1f2671..16b4a6e38e9e2 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -114,9 +114,7 @@ def operator_equal(a, b): raise ValueError("In operator_equal not equal\n") for k, v in a.__dict__.items(): - if isinstance(v, fluid.framework.Program) or isinstance( - v, fluid.framework.Block - ): + if isinstance(v, (fluid.framework.Program, fluid.framework.Block)): continue elif isinstance(v, core.OpDesc): @@ -137,13 +135,10 @@ def operator_equal(a, b): def block_equal(a, b): for k, v in a.__dict__.items(): - if ( - isinstance(v, core.ProgramDesc) - or isinstance(v, fluid.framework.Program) - or isinstance(v, core.BlockDesc) + if isinstance( + v, (core.ProgramDesc, fluid.framework.Program, core.BlockDesc) ): continue - elif k == "ops": assert len(a.ops) == len(b.ops) for i in range(0, len(a.ops)): diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index aa837010ccb28..9719c02b85e44 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -38,12 +38,12 @@ def linear_interp_test( align_corners=True, align_mode=0, ): - if isinstance(scale, float) or isinstance(scale, int): + if isinstance(scale, (float, int)): scale_list = [] for _ in range(len(x.shape) - 2): scale_list.append(scale) scale = list(map(float, scale_list)) - elif isinstance(scale, list) or isinstance(scale, tuple): + elif isinstance(scale, (list, tuple)): scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( @@ -138,7 +138,7 @@ def setUp(self): in_w = self.input_shape[1] if self.scale > 0: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): self.scale = float(self.scale) if isinstance(self.scale, list): self.scale = float(self.scale[0]) @@ -170,7 +170,7 @@ def setUp(self): 'data_layout': self.data_layout, } if self.scale > 0: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): self.scale = [float(self.scale)] self.attrs['scale'] = self.scale self.outputs = {'Out': output_np} @@ -262,7 +262,7 @@ def setUp(self): in_w = self.input_shape[1] if self.scale > 0: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): self.scale = float(self.scale) if isinstance(self.scale, list): self.scale = float(self.scale[0]) @@ -302,7 +302,7 @@ def setUp(self): 'data_layout': self.data_layout, } if self.scale > 0: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] @@ -343,7 +343,7 @@ def setUp(self): input_np = np.random.random(self.input_shape).astype("uint8") if self.scale > 0: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): self.scale = float(self.scale) if isinstance(self.scale, list): self.scale = float(self.scale[0]) @@ -371,7 +371,7 @@ def setUp(self): 'align_mode': self.align_mode, } if self.scale > 0: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py index 282d474adee98..7535e9d08892e 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py @@ -39,12 +39,12 @@ def nearest_interp_test( align_corners=True, align_mode=0, ): - if isinstance(scale, float) or isinstance(scale, int): + if isinstance(scale, (float, int)): scale_list = [] for _ in range(len(x.shape) - 2): scale_list.append(scale) scale = list(map(float, scale_list)) - elif isinstance(scale, list) or isinstance(scale, tuple): + elif isinstance(scale, (list, tuple)): scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( @@ -233,7 +233,7 @@ def setUp(self): scale_h = 0 scale_w = 0 if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_d = scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -305,7 +305,7 @@ def setUp(self): 'data_layout': self.data_layout, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -438,7 +438,7 @@ def setUp(self): ).astype("uint8") if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -472,7 +472,7 @@ def setUp(self): 'align_corners': self.align_corners, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -583,7 +583,7 @@ def setUp(self): if self.scale_by_1Dtensor: self.inputs['Scale'] = np.array([self.scale]).astype("float64") elif self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -610,7 +610,7 @@ def setUp(self): self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py index 98261ae38c90d..24b5355fc1ecb 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py @@ -39,12 +39,12 @@ def trilinear_interp_test( align_corners=True, align_mode=0, ): - if isinstance(scale, float) or isinstance(scale, int): + if isinstance(scale, (float, int)): scale_list = [] for _ in range(len(x.shape) - 2): scale_list.append(scale) scale = list(map(float, scale_list)) - elif isinstance(scale, list) or isinstance(scale, tuple): + elif isinstance(scale, (list, tuple)): scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( @@ -219,7 +219,7 @@ def setUp(self): in_w = self.input_shape[3] if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_d = scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -270,7 +270,7 @@ def setUp(self): 'data_layout': data_layout, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -434,7 +434,7 @@ def setUp(self): ).astype("uint8") if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_d = scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -477,7 +477,7 @@ def setUp(self): 'align_mode': self.align_mode, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -611,7 +611,7 @@ def setUp(self): if self.scale_by_1Dtensor: self.inputs['Scale'] = np.array([self.scale]).astype("float32") elif self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_d = scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -642,7 +642,7 @@ def setUp(self): self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/testsuite.py b/python/paddle/fluid/tests/unittests/testsuite.py index ac6b065212d03..7db80c08eacff 100644 --- a/python/paddle/fluid/tests/unittests/testsuite.py +++ b/python/paddle/fluid/tests/unittests/testsuite.py @@ -71,7 +71,7 @@ def __create_var__(name, var_name): def set_input(scope, op, inputs, place): def __set_input__(var_name, var): - if isinstance(var, tuple) or isinstance(var, np.ndarray): + if isinstance(var, (tuple, np.ndarray)): tensor = scope.find_var(var_name).get_tensor() if isinstance(var, tuple): tensor.set_recursive_sequence_lengths(var[1]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py index 81e08c000b7f4..5067baf57c999 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py @@ -141,7 +141,7 @@ def setUp(self): scale_h = 0 scale_w = 0 if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -182,7 +182,7 @@ def setUp(self): 'data_layout': self.data_layout, } if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: @@ -389,7 +389,7 @@ def setUp(self): if self.scale_by_1Dtensor: self.inputs['Scale'] = np.array([self.scale]).astype("float32") elif self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -416,7 +416,7 @@ def setUp(self): self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py index 69bf56a2c4410..7a9150312c9f7 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py @@ -212,7 +212,7 @@ def setUp(self): scale_h = 0 scale_w = 0 if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_d = scale_h = scale_w = float(self.scale) self.scale = [self.scale] @@ -450,7 +450,7 @@ def setUp(self): if self.scale_by_1Dtensor: self.inputs['Scale'] = np.array([self.scale]).astype("float32") elif self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: @@ -477,7 +477,7 @@ def setUp(self): self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w if self.scale: - if isinstance(self.scale, float) or isinstance(self.scale, int): + if isinstance(self.scale, (float, int)): if self.scale > 0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: diff --git a/python/paddle/hapi/progressbar.py b/python/paddle/hapi/progressbar.py index 88dcdf7944b76..f9db60d04aa6d 100644 --- a/python/paddle/hapi/progressbar.py +++ b/python/paddle/hapi/progressbar.py @@ -81,11 +81,7 @@ def convert_uint16_to_float(in_list): for i, (k, val) in enumerate(values): if k == "loss": - val = ( - val - if isinstance(val, list) or isinstance(val, np.ndarray) - else [val] - ) + val = val if isinstance(val, (list, np.ndarray)) else [val] if isinstance(val[0], np.uint16): values[i] = ("loss", list(convert_uint16_to_float(val))) diff --git a/python/paddle/incubate/distributed/fleet/utils.py b/python/paddle/incubate/distributed/fleet/utils.py index db392f15214e4..4b47d203e97c2 100644 --- a/python/paddle/incubate/distributed/fleet/utils.py +++ b/python/paddle/incubate/distributed/fleet/utils.py @@ -192,7 +192,7 @@ def load_var(var_name, shape_list, dtype, save_path): def reader(batch_size, fn, dim): data = [] - if isinstance(dim, list) or isinstance(dim, tuple): + if isinstance(dim, (list, tuple)): shape = list(dim) _temp = 1 for x in dim: diff --git a/python/paddle/jit/api.py b/python/paddle/jit/api.py index bd606ebb01483..beca6231f75b1 100644 --- a/python/paddle/jit/api.py +++ b/python/paddle/jit/api.py @@ -915,9 +915,7 @@ def fun(inputs): ) if not ( - isinstance(layer, Layer) - or inspect.isfunction(layer) - or isinstance(layer, StaticFunction) + isinstance(layer, (Layer, StaticFunction)) or inspect.isfunction(layer) ): raise TypeError( "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s." diff --git a/python/paddle/jit/dy2static/static_analysis.py b/python/paddle/jit/dy2static/static_analysis.py index a8d810b2b81e1..4b6eaba6a75fb 100644 --- a/python/paddle/jit/dy2static/static_analysis.py +++ b/python/paddle/jit/dy2static/static_analysis.py @@ -179,9 +179,7 @@ def dfs_visit(self, node): self.ancestor_wrappers.append(cur_wrapper) for child in gast.iter_child_nodes(node): - if isinstance(child, gast.FunctionDef) or isinstance( - child, gast.AsyncFunctionDef - ): + if isinstance(child, (gast.FunctionDef, gast.AsyncFunctionDef)): # TODO: current version is function name mapping to its type # consider complex case involving parameters self.var_env.enter_scope( diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 8968e74803f17..e3ea8faf810b2 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -399,9 +399,7 @@ def interpolate( if size is None and scale_factor is None: raise ValueError("One of size and scale_factor must not be None.") - if (isinstance(size, list) or isinstance(size, tuple)) and len( - size - ) != x.ndim - 2: + if isinstance(size, (tuple, list)) and (len(size) != x.ndim - 2): raise ValueError( 'The x and size should satisfy rank(x) - 2 == len(size).' ) @@ -427,11 +425,7 @@ def interpolate( ) if resample == 'AREA': - if ( - isinstance(size, list) - or isinstance(size, tuple) - or isinstance(size, Variable) - ): + if isinstance(size, (list, tuple, Variable)): if len(size) == 0: raise ValueError("output size can not be empty") if size is None: @@ -464,7 +458,7 @@ def interpolate( ) def _is_list_or_turple_(data): - return isinstance(data, list) or isinstance(data, tuple) + return isinstance(data, (list, tuple)) if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': data_layout = 'NCHW' @@ -581,18 +575,14 @@ def _is_list_or_turple_(data): if isinstance(scale, Variable): scale.stop_gradient = True inputs["Scale"] = scale - elif ( - isinstance(scale, float) - or isinstance(scale, int) - or isinstance(scale, numpy.ndarray) - ): + elif isinstance(scale, (float, int, numpy.ndarray)): if scale <= 0: raise ValueError("Attr(scale) should be greater than zero.") scale_list = [] for i in range(len(x.shape) - 2): scale_list.append(scale) attrs['scale'] = list(map(float, scale_list)) - elif isinstance(scale, list) or isinstance(scale, tuple): + elif isinstance(scale, (list, tuple)): if len(scale) != len(x.shape) - 2: raise ValueError( "scale_shape length should be {} for " @@ -2275,7 +2265,7 @@ def fold( assert len(x.shape) == 3, "input should be the format of [N, C, L]" def _is_list_or_turple_(data): - return isinstance(data, list) or isinstance(data, tuple) + return isinstance(data, (list, tuple)) if isinstance(output_sizes, int): output_sizes = [output_sizes, output_sizes] diff --git a/python/paddle/nn/initializer/constant.py b/python/paddle/nn/initializer/constant.py index 0016467f117b0..d58aa653cb6e6 100644 --- a/python/paddle/nn/initializer/constant.py +++ b/python/paddle/nn/initializer/constant.py @@ -50,9 +50,7 @@ def forward(self, var, block=None): """ block = self._check_block(block) - assert isinstance(var, framework.Variable) or isinstance( - var, framework.EagerParamBase - ) + assert isinstance(var, (framework.Variable, framework.EagerParamBase)) assert isinstance(block, framework.Block) if in_dygraph_mode(): diff --git a/python/paddle/nn/layer/layers.py b/python/paddle/nn/layer/layers.py index 5f3b765f77c92..b1f184469faad 100644 --- a/python/paddle/nn/layer/layers.py +++ b/python/paddle/nn/layer/layers.py @@ -1860,7 +1860,7 @@ def _check_match(key, param): raise ValueError( "{} is not found in the provided dict.".format(key) ) - if isinstance(state, dict) or isinstance(state, list): + if isinstance(state, (dict, list)): if len(state) != len(param): missing_keys.append(key) raise ValueError( diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 904a8d2c11aac..c9945506fd9b1 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -593,7 +593,7 @@ def get_initial_states( def _is_shape_sequence(seq): """For shape, list/tuple of integer is the finest-grained objection""" - if isinstance(seq, list) or isinstance(seq, tuple): + if isinstance(seq, (list, tuple)): if reduce( lambda flag, x: isinstance(x, int) and flag, seq, True ): diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 9e7b4c55ba49e..49c9e0a3f4b68 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -1087,11 +1087,7 @@ def __init__( def forward(self, *inputs, **kwargs): out = self._layer(*inputs, **kwargs) # TODO (jc): support the ops of several outputs - if ( - isinstance(out, list) - or isinstance(out, tuple) - or isinstance(out, dict) - ): + if isinstance(out, (list, tuple, dict)): return out else: return self._ma_output_scale(out) @@ -1129,7 +1125,7 @@ def __init__( def forward(self, *inputs, **kwargs): out = self._layer(*inputs, **kwargs) # TODO (jc): support the ops of several outputs - if (isinstance(out, list) or isinstance(out, tuple)) and len(out) > 1: + if (isinstance(out, (list, tuple))) and len(out) > 1: return out else: return self._fake_quant_output(out) diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index cd94737e7a0e7..4a5dd476fa14f 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -791,11 +791,7 @@ def __init__( last_epoch=-1, verbose=False, ): - type_check = ( - isinstance(learning_rate, float) - or isinstance(learning_rate, int) - or isinstance(learning_rate, LRScheduler) - ) + type_check = isinstance(learning_rate, (float, int, LRScheduler)) if not type_check: raise TypeError( "the type of learning_rate should be [int, float or LRScheduler], the current type is {}".format( diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index a520cf06a0c11..05e76601feaea 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -961,13 +961,11 @@ def conv2d( # padding def _update_padding(padding, data_format): - def is_list_or_tuple(ele): - if isinstance(ele, list) or isinstance(ele, tuple): - return True - return False - if is_list_or_tuple(padding) and len(padding) == 4: - if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): + if isinstance(padding, (list, tuple)) and len(padding) == 4: + if isinstance(padding[0], (list, tuple)) and ( + data_format == "NCHW" + ): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -975,7 +973,9 @@ def is_list_or_tuple(ele): ) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] - elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): + elif isinstance(padding[0], (list, tuple)) and ( + data_format == "NHWC" + ): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1257,13 +1257,11 @@ def conv3d( dilation = paddle.utils.convert_to_list(dilation, 3, 'dilation') def _update_padding(padding, data_format): - def is_list_or_tuple(ele): - if isinstance(ele, list) or isinstance(ele, tuple): - return True - return False - if is_list_or_tuple(padding) and len(padding) == 5: - if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): + if isinstance(padding, (list, tuple)) and len(padding) == 5: + if isinstance(padding[0], (list, tuple)) and ( + data_format == "NCDHW" + ): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1271,7 +1269,9 @@ def is_list_or_tuple(ele): ) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] - elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): + elif isinstance(padding[0], (list, tuple)) and ( + data_format == "NDHWC" + ): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1282,7 +1282,7 @@ def is_list_or_tuple(ele): padding = paddle.utils.convert_to_list(padding, 6, 'padding') if paddle.utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] - elif is_list_or_tuple(padding) and len(padding) == 6: + elif isinstance(padding, (list, tuple)) and len(padding) == 6: padding = paddle.utils.convert_to_list(padding, 6, 'padding') if paddle.utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] @@ -1580,13 +1580,11 @@ def conv2d_transpose( raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): - def is_list_or_tuple(ele): - if isinstance(ele, list) or isinstance(ele, tuple): - return True - return False - if is_list_or_tuple(padding) and len(padding) == 4: - if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): + if isinstance(padding, (list, tuple)) and len(padding) == 4: + if isinstance(padding[0], (list, tuple)) and ( + data_format == "NCHW" + ): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1594,7 +1592,9 @@ def is_list_or_tuple(ele): ) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] - elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): + elif isinstance(padding[0], (list, tuple)) and ( + data_format == "NHWC" + ): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1951,13 +1951,11 @@ def conv3d_transpose( raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): - def is_list_or_tuple(ele): - if isinstance(ele, list) or isinstance(ele, tuple): - return True - return False - if is_list_or_tuple(padding) and len(padding) == 5: - if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): + if isinstance(padding, (list, tuple)) and len(padding) == 5: + if isinstance(padding[0], (list, tuple)) and ( + data_format == "NCDHW" + ): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1965,7 +1963,9 @@ def is_list_or_tuple(ele): ) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] - elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): + elif isinstance(padding[0], (list, tuple)) and ( + data_format == "NDHWC" + ): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " @@ -1975,7 +1975,7 @@ def is_list_or_tuple(ele): padding = [ele for a_list in padding for ele in a_list] padding = paddle.utils.convert_to_list(padding, 6, 'padding') - elif is_list_or_tuple(padding) and len(padding) == 6: + elif isinstance(padding, (list, tuple)) and len(padding) == 6: padding = paddle.utils.convert_to_list(padding, 6, 'padding') else: diff --git a/python/paddle/static/quantization/quantization_pass.py b/python/paddle/static/quantization/quantization_pass.py index 8ca9da260ae6f..f483dad45ea7d 100644 --- a/python/paddle/static/quantization/quantization_pass.py +++ b/python/paddle/static/quantization/quantization_pass.py @@ -1438,12 +1438,7 @@ def _dequantized_var_name(self, var_name): return "%s.dequantized" % (var_name) def _is_float(self, v): - return ( - isinstance(v, float) - or isinstance(v, np.float16) - or isinstance(v, np.float32) - or isinstance(v, np.float64) - ) + return isinstance(v, (float, np.float16, np.float32, np.float64)) class ConvertToInt8Pass: diff --git a/python/paddle/static/quantization/tests/test_imperative_ptq.py b/python/paddle/static/quantization/tests/test_imperative_ptq.py index 04fc5bc9bc946..fff6087400376 100644 --- a/python/paddle/static/quantization/tests/test_imperative_ptq.py +++ b/python/paddle/static/quantization/tests/test_imperative_ptq.py @@ -60,10 +60,7 @@ def test_fuse(self): quant_h = ptq.quantize(model_h, fuse=True, fuse_list=f_l) for name, layer in quant_model.named_sublayers(): if name in f_l: - assert not ( - isinstance(layer, nn.BatchNorm1D) - or isinstance(layer, nn.BatchNorm2D) - ) + assert not (isinstance(layer, (nn.BatchNorm1D, nn.BatchNorm2D))) out = model(inputs) out_h = model_h(inputs) out_quant = quant_model(inputs) @@ -294,10 +291,7 @@ def func_ptq(self): quant_model = self.ptq.quantize(model, fuse=True, fuse_list=f_l) for name, layer in quant_model.named_sublayers(): if name in f_l: - assert not ( - isinstance(layer, nn.BatchNorm1D) - or isinstance(layer, nn.BatchNorm2D) - ) + assert not (isinstance(layer, (nn.BatchNorm1D, nn.BatchNorm2D))) before_acc_top1 = self.model_test( quant_model, self.batch_num, self.batch_size ) diff --git a/python/paddle/tensor/layer_function_generator.py b/python/paddle/tensor/layer_function_generator.py index 2fb1656680e33..fd1cf73d37f42 100644 --- a/python/paddle/tensor/layer_function_generator.py +++ b/python/paddle/tensor/layer_function_generator.py @@ -238,11 +238,7 @@ def func(*args, **kwargs): outputs = {} out = kwargs.pop(_convert_(o_name), []) if out: - out_var = ( - out[0] - if (isinstance(out, list) or isinstance(out, tuple)) - else out - ) + out_var = out[0] if isinstance(out, (list, tuple)) else out else: out_var = helper.create_variable_for_type_inference(dtype=dtype) outputs[o_name] = [out_var] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 7e3b561064e83..88c0fbcdd0fa9 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1651,7 +1651,7 @@ def add_n(inputs, name=None): else: helper = LayerHelper('add_n', **locals()) check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n') - if isinstance(inputs, list) or isinstance(inputs, tuple): + if isinstance(inputs, (list, tuple)): if len(inputs) > 0: for input in inputs: check_variable_and_dtype( diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index ac4e018fa2713..a72a87b1ebd86 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -495,7 +495,7 @@ def prior_box( """ def _is_list_or_tuple_(data): - return isinstance(data, list) or isinstance(data, tuple) + return isinstance(data, (list, tuple)) if not _is_list_or_tuple_(min_sizes): min_sizes = [min_sizes]