From 0e878e52195e8e02afc332d30eec89f111eb1327 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:14:38 +0800 Subject: [PATCH] [CodeStyle][task 1] enable Ruff UP032 rule with . except `python/paddle/base` (#57409) * update up032 * update up032 * Update api_gen.py * Update api_gen.py * Update sampcd_processor_utils.py --- paddle/phi/api/yaml/generator/api_gen.py | 4 +- .../api/yaml/generator/backward_api_gen.py | 4 +- paddle/phi/api/yaml/generator/dist_api_gen.py | 8 +- .../phi/api/yaml/generator/dist_bw_api_gen.py | 8 +- .../phi/api/yaml/generator/sparse_api_gen.py | 4 +- .../api/yaml/generator/sparse_bw_api_gen.py | 4 +- .../phi/api/yaml/generator/strings_api_gen.py | 4 +- .../generate_kernels.py | 14 +- pyproject.toml | 324 ------------------ python/paddle/amp/accuracy_compare.py | 20 +- python/paddle/audio/backends/init_backend.py | 4 +- python/paddle/audio/backends/wave_backend.py | 4 +- python/paddle/batch.py | 2 +- python/paddle/dataset/common.py | 4 +- python/paddle/device/cuda/__init__.py | 4 +- .../distributed/auto_parallel/interface.py | 16 +- .../auto_parallel/static/completion.py | 8 +- .../auto_parallel/static/converter.py | 16 +- .../auto_parallel/static/cost/base_cost.py | 12 +- .../auto_parallel/static/cost/tensor_cost.py | 8 +- .../auto_parallel/static/cost_model.py | 8 +- .../auto_parallel/static/dist_context.py | 6 +- .../auto_parallel/static/dist_op.py | 4 +- .../auto_parallel/static/dist_tensor.py | 8 +- .../auto_parallel/static/engine.py | 20 +- .../distributed/auto_parallel/static/graph.py | 4 +- .../dist_check_finite_and_unscale.py | 4 +- .../static/operators/dist_default.py | 28 +- .../static/operators/dist_embedding.py | 16 +- .../static/operators/dist_matmul.py | 142 +++----- .../static/operators/dist_pnorm.py | 24 +- .../static/operators/dist_reduce_sum_p.py | 12 +- .../static/operators/dist_reshape.py | 36 +- .../operators/dist_update_loss_scaling.py | 4 +- .../auto_parallel/static/partitioner.py | 12 +- .../auto_parallel/static/reshard.py | 23 +- .../auto_parallel/static/tuner/algorithms.py | 4 +- .../static/tuner/optimization_tuner.py | 4 +- .../auto_parallel/static/tuner/recorder.py | 4 +- .../static/tuner/rule_based_tuner.py | 12 +- .../static/tuner/tunable_variable.py | 8 +- .../distributed/auto_parallel/static/utils.py | 62 ++-- .../distributed/auto_parallel/strategy.py | 4 +- python/paddle/distributed/cloud_utils.py | 22 +- .../paddle/distributed/communication/group.py | 12 +- .../communication/stream/gather.py | 4 +- .../fleet/base/orthogonal_strategy.py | 8 +- .../distributed/fleet/base/role_maker.py | 8 +- .../distributed/fleet/base/strategy_group.py | 4 +- .../distributed/fleet/base/util_factory.py | 8 +- .../paddle/distributed/fleet/cloud_utils.py | 14 +- .../distributed/fleet/elastic/manager.py | 28 +- python/paddle/distributed/fleet/launch.py | 4 +- .../paddle/distributed/fleet/launch_utils.py | 60 +--- .../distributed/fleet/layers/mpu/mp_layers.py | 12 +- .../distributed/fleet/layers/mpu/mp_ops.py | 28 +- .../dygraph_sharding_optimizer.py | 12 +- .../meta_optimizers/meta_optimizer_base.py | 8 +- .../meta_optimizers/sharding/fp16_helper.py | 4 +- .../fleet/meta_optimizers/sharding/utils.py | 36 +- .../sharding/weight_decay_helper.py | 2 +- .../meta_optimizers/sharding_optimizer.py | 12 +- .../parallel_layers/pp_layers.py | 16 +- .../fleet/meta_parallel/pipeline_parallel.py | 4 +- .../fleet/recompute/recompute_hybrid.py | 4 +- .../fleet/runtime/parameter_server_runtime.py | 8 +- .../distributed/fleet/runtime/the_one_ps.py | 8 +- python/paddle/distributed/fleet/utils/fs.py | 8 +- .../fleet/utils/hybrid_parallel_inference.py | 10 +- .../fleet/utils/pp_parallel_adaptor.py | 6 +- .../fleet/utils/sequence_parallel_utils.py | 12 +- .../fleet/utils/tensor_parallel_utils.py | 8 +- .../distributed/launch/context/device.py | 4 +- .../launch/controllers/collective.py | 4 +- python/paddle/distributed/launch/job/pod.py | 4 +- .../distributed/launch/plugins/__init__.py | 4 +- python/paddle/distributed/parallel.py | 8 +- .../distributed/passes/auto_parallel_amp.py | 12 +- ...uto_parallel_data_parallel_optimization.py | 14 +- .../distributed/passes/auto_parallel_fp16.py | 16 +- .../passes/auto_parallel_grad_clip.py | 4 +- .../passes/auto_parallel_pipeline.py | 2 +- .../passes/auto_parallel_sharding.py | 13 +- .../passes/pipeline_scheduler_pass.py | 4 +- python/paddle/distributed/ps/coordinator.py | 8 +- python/paddle/distributed/ps/the_one_ps.py | 6 +- python/paddle/distributed/ps/utils/public.py | 12 +- .../paddle/distributed/utils/launch_utils.py | 16 +- python/paddle/distributed/utils/nccl_utils.py | 4 +- python/paddle/fft.py | 40 +-- python/paddle/framework/io.py | 24 +- python/paddle/framework/random.py | 8 +- python/paddle/hapi/callbacks.py | 4 +- python/paddle/hapi/dynamic_flops.py | 4 +- python/paddle/hapi/hub.py | 16 +- python/paddle/hapi/model_summary.py | 6 +- python/paddle/hapi/progressbar.py | 4 +- python/paddle/incubate/asp/asp.py | 6 +- .../incubate/asp/supported_layer_list.py | 6 +- python/paddle/incubate/asp/utils.py | 8 +- .../incubate/distributed/fleet/fleet_util.py | 28 +- .../distributed_strategy.py | 4 +- .../fleet/parameter_server/ir/trainer_pass.py | 12 +- .../incubate/distributed/fleet/utils.py | 8 +- .../incubate/nn/layer/fused_dropout_nd.py | 4 +- .../incubate/nn/layer/fused_transformer.py | 36 +- .../incubate/optimizer/functional/bfgs.py | 4 +- .../incubate/optimizer/functional/lbfgs.py | 4 +- .../incubate/optimizer/gradient_merge.py | 4 +- python/paddle/incubate/optimizer/pipeline.py | 32 +- python/paddle/incubate/optimizer/recompute.py | 20 +- python/paddle/incubate/passes/ir.py | 19 +- python/paddle/io/dataloader/batch_sampler.py | 16 +- python/paddle/io/dataloader/collate.py | 2 +- .../paddle/io/dataloader/dataloader_iter.py | 15 +- python/paddle/io/dataloader/flat.py | 4 +- python/paddle/io/dataloader/sampler.py | 4 +- python/paddle/io/dataloader/worker.py | 4 +- python/paddle/io/reader.py | 4 +- python/paddle/jit/api.py | 4 +- .../paddle/jit/dy2static/base_transformer.py | 8 +- .../jit/dy2static/basic_api_transformer.py | 4 +- .../paddle/jit/dy2static/convert_operators.py | 2 +- .../jit/dy2static/decorator_transformer.py | 4 +- python/paddle/jit/dy2static/error.py | 8 +- python/paddle/jit/dy2static/function_spec.py | 8 +- .../jit/dy2static/logical_transformer.py | 8 +- python/paddle/jit/dy2static/origin_info.py | 8 +- python/paddle/jit/dy2static/utils.py | 28 +- .../jit/dy2static/variable_trans_func.py | 12 +- python/paddle/metric/metrics.py | 16 +- python/paddle/nn/functional/activation.py | 10 +- python/paddle/nn/functional/common.py | 30 +- python/paddle/nn/functional/conv.py | 104 ++---- python/paddle/nn/functional/extension.py | 2 +- python/paddle/nn/functional/input.py | 4 +- python/paddle/nn/functional/loss.py | 56 ++- python/paddle/nn/functional/norm.py | 10 +- python/paddle/nn/functional/pooling.py | 12 +- python/paddle/nn/functional/vision.py | 22 +- python/paddle/nn/initializer/initializer.py | 4 +- python/paddle/nn/layer/activation.py | 12 +- python/paddle/nn/layer/common.py | 28 +- python/paddle/nn/layer/container.py | 4 +- python/paddle/nn/layer/layers.py | 14 +- python/paddle/nn/layer/loss.py | 4 +- python/paddle/nn/layer/norm.py | 22 +- python/paddle/nn/layer/pooling.py | 12 +- python/paddle/nn/layer/rnn.py | 10 +- python/paddle/nn/layer/transformer.py | 46 +-- python/paddle/nn/layer/vision.py | 6 +- python/paddle/nn/utils/spectral_norm_hook.py | 4 +- python/paddle/nn/utils/weight_norm_hook.py | 2 +- python/paddle/onnx/export.py | 4 +- python/paddle/optimizer/lr.py | 16 +- python/paddle/optimizer/optimizer.py | 20 +- python/paddle/profiler/profiler.py | 20 +- python/paddle/profiler/profiler_statistic.py | 4 +- python/paddle/profiler/timer.py | 5 +- python/paddle/signal.py | 16 +- python/paddle/sparse/creation.py | 8 +- python/paddle/sparse/nn/functional/conv.py | 28 +- python/paddle/sparse/unary.py | 4 +- python/paddle/static/amp/bf16/amp_utils.py | 16 +- python/paddle/static/amp/fp16_utils.py | 4 +- python/paddle/static/io.py | 20 +- python/paddle/static/nn/common.py | 36 +- python/paddle/static/nn/control_flow.py | 14 +- .../static/quantization/quantization_pass.py | 4 +- python/paddle/tensor/linalg.py | 30 +- python/paddle/tensor/manipulation.py | 42 +-- python/paddle/tensor/math.py | 12 +- python/paddle/tensor/to_string.py | 12 +- .../utils/cpp_extension/cpp_extension.py | 8 +- .../utils/cpp_extension/extension_utils.py | 28 +- python/paddle/utils/deprecated.py | 6 +- python/paddle/utils/dlpack.py | 4 +- python/paddle/utils/download.py | 30 +- python/paddle/utils/install_check.py | 16 +- python/paddle/utils/layers_utils.py | 2 +- python/paddle/vision/datasets/cifar.py | 4 +- python/paddle/vision/datasets/flowers.py | 4 +- python/paddle/vision/datasets/mnist.py | 4 +- python/paddle/vision/datasets/voc2012.py | 4 +- python/paddle/vision/image.py | 8 +- python/paddle/vision/models/densenet.py | 4 +- python/paddle/vision/models/mobilenetv3.py | 4 +- python/paddle/vision/models/squeezenet.py | 4 +- .../vision/transforms/functional_tensor.py | 4 +- python/paddle/vision/transforms/transforms.py | 12 +- test/book/test_word2vec_book.py | 4 +- test/cinn/op_mappers/op_mapper_test.py | 8 +- test/cinn/passes/pass_test.py | 8 +- test/cinn/test_paddle_model_convertor.py | 4 +- .../fleet/parallel_dygraph_se_resnext.py | 4 +- .../fleet/test_parallel_dygraph_pp_adaptor.py | 8 +- .../test_multi_precision_fp16_train.py | 12 +- .../api/full_ILSVRC2012_val_preprocess.py | 4 +- .../cpp_extension/test_cpp_extension_setup.py | 4 +- .../test_mixed_extension_setup.py | 16 +- test/cpp_extension/utils.py | 8 +- test/custom_kernel/test_custom_kernel_dot.py | 8 +- test/custom_kernel/test_custom_kernel_load.py | 8 +- test/custom_op/test_context_pool.py | 4 +- test/custom_op/test_custom_attrs_jit.py | 4 +- test/custom_op/test_custom_cast_op_jit.py | 4 +- test/custom_op/test_custom_concat.py | 4 +- test/custom_op/test_custom_relu_op_jit.py | 12 +- test/custom_op/test_custom_relu_op_setup.py | 8 +- .../test_custom_relu_op_xpu_setup.py | 4 +- test/custom_op/test_custom_simple_slice.py | 4 +- test/custom_op/test_custom_tensor_operator.py | 4 +- test/custom_op/utils.py | 8 +- .../test_collective_process_group_xccl.py | 4 +- test/custom_runtime/test_custom_cpu_plugin.py | 4 +- .../test_custom_cpu_profiler_plugin.py | 4 +- .../test_custom_cpu_to_static.py | 4 +- test/custom_runtime/test_custom_op_setup.py | 24 +- .../distributed_passes/dist_pass_test_base.py | 4 +- test/dygraph_to_static/test_break_continue.py | 4 +- test/dygraph_to_static/test_build_strategy.py | 12 +- test/dygraph_to_static/test_cache_program.py | 4 +- test/dygraph_to_static/test_cast.py | 12 +- test/dygraph_to_static/test_container.py | 4 +- test/dygraph_to_static/test_convert_call.py | 4 +- test/dygraph_to_static/test_dict.py | 8 +- test/dygraph_to_static/test_error.py | 12 +- test/dygraph_to_static/test_fetch_feed.py | 4 +- test/dygraph_to_static/test_lac.py | 4 +- test/dygraph_to_static/test_layer_hook.py | 4 +- test/dygraph_to_static/test_list.py | 4 +- test/dygraph_to_static/test_logical.py | 8 +- test/dygraph_to_static/test_lstm.py | 12 +- test/dygraph_to_static/test_mnist.py | 4 +- test/dygraph_to_static/test_mnist_amp.py | 4 +- .../dygraph_to_static/test_mnist_pure_fp16.py | 4 +- test/dygraph_to_static/test_mobile_net.py | 8 +- test/dygraph_to_static/test_pylayer.py | 4 +- test/dygraph_to_static/test_resnet.py | 24 +- test/dygraph_to_static/test_resnet_amp.py | 8 +- .../test_resnet_pure_fp16.py | 8 +- test/dygraph_to_static/test_resnet_v2.py | 24 +- test/dygraph_to_static/test_se_resnet.py | 8 +- test/dygraph_to_static/test_seq2seq.py | 8 +- test/dygraph_to_static/yolov3.py | 4 +- test/fft/spectral_op_np.py | 4 +- .../test_trt_convert_multiclass_nms.py | 4 +- .../test_trt_convert_multiclass_nms3.py | 4 +- test/ir/inference/test_trt_pool3d_op.py | 12 +- test/ir/inference/test_trt_pool_op.py | 4 +- test/legacy_test/auto_parallel_autoconvert.py | 16 +- test/legacy_test/benchmark.py | 8 +- test/legacy_test/dist_fleet_ctr.py | 8 +- test/legacy_test/dist_fleet_ctr_ps_gpu.py | 4 +- test/legacy_test/dist_fleet_simnet_bow.py | 4 +- .../dist_fleet_sparse_embedding_ctr.py | 6 +- .../legacy_test/dist_fleet_sync_batch_norm.py | 8 +- test/legacy_test/dist_se_resnext.py | 4 +- test/legacy_test/fleet_meta_optimizer_base.py | 4 +- test/legacy_test/gradient_checker.py | 8 +- test/legacy_test/test_chunk_eval_op.py | 6 +- test/legacy_test/test_detach.py | 4 +- test/legacy_test/test_dist_base.py | 13 +- test/legacy_test/test_dist_fleet_base.py | 8 +- .../test_eager_deletion_delete_vars.py | 10 +- test/legacy_test/test_fused_dropout_add_op.py | 4 +- .../legacy_test/test_generate_proposals_op.py | 4 +- test/legacy_test/test_generator_dataloader.py | 6 +- test/legacy_test/test_imperative_resnet.py | 4 +- .../legacy_test/test_imperative_se_resnext.py | 4 +- test/legacy_test/test_inplace.py | 28 +- test/legacy_test/test_layers.py | 8 +- test/legacy_test/test_lstm_cudnn_op.py | 2 +- test/legacy_test/test_multi_dot_op.py | 4 +- ...cess_dataloader_iterable_dataset_static.py | 8 +- .../test_multiprocess_dataloader_static.py | 8 +- test/legacy_test/test_ops_nms.py | 4 +- test/legacy_test/test_pylayer_op.py | 4 +- test/legacy_test/test_run.py | 10 +- test/legacy_test/test_sample_logits_op.py | 12 +- test/legacy_test/test_signal.py | 39 +-- test/legacy_test/test_static_save_load.py | 8 +- test/legacy_test/test_sync_batch_norm_op.py | 8 +- test/legacy_test/test_translated_layer.py | 8 +- test/legacy_test/test_tril_triu_op.py | 12 +- test/legacy_test/test_variable.py | 32 +- .../test_view_op_reuse_allocation.py | 4 +- ...st_onnx_format_quantization_mobilenetv1.py | 4 +- test/ps/static_gpubox_trainer.py | 4 +- ...t2_int8_image_classification_comparison.py | 14 +- test/quantization/quant2_int8_lstm_model.py | 12 +- ...nt_int8_image_classification_comparison.py | 12 +- test/quantization/test_imperative_ptq.py | 4 +- test/quantization/test_imperative_qat_amp.py | 4 +- ...t_post_training_quantization_lstm_model.py | 18 +- .../test_post_training_quantization_mnist.py | 22 +- ..._post_training_quantization_mobilenetv1.py | 4 +- .../test_post_training_quantization_while.py | 12 +- .../test_quant_post_quant_aware.py | 12 +- .../test_weight_quantization_mobilenetv1.py | 4 +- test/rnn/rnn_numpy.py | 6 +- test/tokenizer/bert_tokenizer.py | 6 +- test/tokenizer/tokenizer_utils.py | 8 +- test/xpu/test_generate_proposals_v2_op_xpu.py | 4 +- test/xpu/test_tril_triu_op_xpu.py | 8 +- tools/analysisPyXml.py | 12 +- tools/check_op_benchmark_result.py | 2 +- tools/check_op_desc.py | 46 +-- tools/count_api_without_core_ops.py | 8 +- tools/coverage/gcda_clean.py | 6 +- tools/coverage/python_coverage.py | 12 +- tools/externalError/spider.py | 5 +- tools/get_single_test_cov.py | 12 +- tools/parse_kernel_info.py | 2 +- tools/print_signatures.py | 8 +- tools/sampcd_processor_utils.py | 12 +- 316 files changed, 1038 insertions(+), 2770 deletions(-) diff --git a/paddle/phi/api/yaml/generator/api_gen.py b/paddle/phi/api/yaml/generator/api_gen.py index 0c47c23276822..fcfcd17922759 100644 --- a/paddle/phi/api/yaml/generator/api_gen.py +++ b/paddle/phi/api/yaml/generator/api_gen.py @@ -305,9 +305,7 @@ def gene_output( ) else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return kernel_output, output_names, output_create diff --git a/paddle/phi/api/yaml/generator/backward_api_gen.py b/paddle/phi/api/yaml/generator/backward_api_gen.py index 9347552dbb134..541a653f3473b 100644 --- a/paddle/phi/api/yaml/generator/backward_api_gen.py +++ b/paddle/phi/api/yaml/generator/backward_api_gen.py @@ -237,9 +237,7 @@ def gene_output( else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return kernel_output, output_names, output_create diff --git a/paddle/phi/api/yaml/generator/dist_api_gen.py b/paddle/phi/api/yaml/generator/dist_api_gen.py index c9885dec64c97..00189d880e67f 100644 --- a/paddle/phi/api/yaml/generator/dist_api_gen.py +++ b/paddle/phi/api/yaml/generator/dist_api_gen.py @@ -595,9 +595,7 @@ def generate_output_creation_code(self) -> str: ) else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return output_creation_code @@ -1073,9 +1071,7 @@ def generate_reshard_partial_out_to_replicated_code(self) -> str: self.vector_output_size_assertion_check() else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) else: reshard_p2r_code = ( diff --git a/paddle/phi/api/yaml/generator/dist_bw_api_gen.py b/paddle/phi/api/yaml/generator/dist_bw_api_gen.py index e5c89a2ad54e4..b29e186f06d38 100644 --- a/paddle/phi/api/yaml/generator/dist_bw_api_gen.py +++ b/paddle/phi/api/yaml/generator/dist_bw_api_gen.py @@ -173,9 +173,7 @@ def generate_output_creation_code(self) -> str: self.vector_output_size_assertion_check() else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return output_creation_code @@ -249,9 +247,7 @@ def generate_reshard_output_code(self): self.vector_output_size_assertion_check() else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) else: # do nothing diff --git a/paddle/phi/api/yaml/generator/sparse_api_gen.py b/paddle/phi/api/yaml/generator/sparse_api_gen.py index 9a017725d6888..172f6703c25bf 100644 --- a/paddle/phi/api/yaml/generator/sparse_api_gen.py +++ b/paddle/phi/api/yaml/generator/sparse_api_gen.py @@ -88,9 +88,7 @@ def gene_output( else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return kernel_output, output_names, output_create diff --git a/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py b/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py index 064cf07d0dbf7..79f335e8c6050 100644 --- a/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py +++ b/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py @@ -98,9 +98,7 @@ def gene_output( else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return kernel_output, output_names, output_create diff --git a/paddle/phi/api/yaml/generator/strings_api_gen.py b/paddle/phi/api/yaml/generator/strings_api_gen.py index 4e66bd5f2fdc6..9948de2b40a53 100644 --- a/paddle/phi/api/yaml/generator/strings_api_gen.py +++ b/paddle/phi/api/yaml/generator/strings_api_gen.py @@ -107,9 +107,7 @@ def gene_output( else: raise ValueError( - "{} : Output error: the output should not be empty.".format( - self.api - ) + f"{self.api} : Output error: the output should not be empty." ) return kernel_output, output_names, output_create diff --git a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/generate_kernels.py b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/generate_kernels.py index 26617ec900534..cbe4571c5d010 100644 --- a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/generate_kernels.py +++ b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/generate_kernels.py @@ -445,13 +445,13 @@ def write_decl_impl( def write_main_header(forward_impl, backward_impl): - main_header_content = ''' + main_header_content = f''' #pragma once -#ifdef {} +#ifdef {ENABLE_MACRO} -#include "{}" -#include "{}" +#include "{forward_impl}" +#include "{backward_impl}" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/backends/gpu/gpu_context.h" @@ -528,11 +528,7 @@ def write_main_header(forward_impl, backward_impl): #include "./cutlass_backward.h" #endif -'''.format( - ENABLE_MACRO, - forward_impl, - backward_impl, - ) +''' path = Path(args.dst_path) / "autogen" os.makedirs(path, exist_ok=True) diff --git a/pyproject.toml b/pyproject.toml index eca2770cb1b4d..9b247f4a738a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,17 +122,6 @@ ignore = [ "PLC0414", ] -# UP032 - -"python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py" = ["UP032"] -"python/paddle/distributed/fleet/utils/pp_parallel_adaptor.py" = ["UP032"] -"python/paddle/incubate/optimizer/gradient_merge.py" = ["UP032"] -"python/paddle/nn/functional/loss.py" = ["UP032"] -"python/paddle/hapi/dynamic_flops.py" = [ "UP032"] -"python/paddle/incubate/optimizer/pipeline.py" = ["UP032"] - - # B017 "test/auto_parallel/spmd_rules/test_reshape_rule.py" = ["B017"] "test/dygraph_to_static/test_assert.py" = ["B017"] @@ -143,316 +132,3 @@ ignore = [ "test/legacy_test/test_eigvals_op.py" = ["B017"] "test/legacy_test/test_tensordot.py" = ["B017"] "test/legacy_test/test_top_k_v2_op.py" = ["B017"] - -# UP032 -"paddle/fluid/ir/dialect/op_generator/api_gen.py" = ["UP032"] -"paddle/fluid/ir/dialect/op_generator/op_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/api_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/backward_api_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/dist_api_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/dist_bw_api_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/sparse_api_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/sparse_bw_api_gen.py" = ["UP032"] -"paddle/phi/api/yaml/generator/strings_api_gen.py" = ["UP032"] -"paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/generate_kernels.py" = ["UP032"] -"parse_build_time.py" = ["UP032"] -"python/paddle/amp/accuracy_compare.py" = ["UP032"] -"python/paddle/audio/backends/init_backend.py" = ["UP032"] -"python/paddle/audio/backends/wave_backend.py" = ["UP032"] -"python/paddle/batch.py" = ["UP032"] -"python/paddle/dataset/common.py" = ["UP032"] -"python/paddle/device/cuda/__init__.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/interface.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/completion.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/converter.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/cost/base_cost.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/cost_model.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/dist_context.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/dist_op.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/dist_tensor.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/engine.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/graph.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_default.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/partitioner.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/reshard.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/tuner/algorithms.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/tuner/recorder.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/static/utils.py" = ["UP032"] -"python/paddle/distributed/auto_parallel/strategy.py" = ["UP032"] -"python/paddle/distributed/cloud_utils.py" = ["UP032"] -"python/paddle/distributed/communication/group.py" = ["UP032"] -"python/paddle/distributed/communication/stream/gather.py" = ["UP032"] -"python/paddle/distributed/fleet/base/orthogonal_strategy.py" = ["UP032"] -"python/paddle/distributed/fleet/base/role_maker.py" = ["UP032"] -"python/paddle/distributed/fleet/base/strategy_group.py" = ["UP032"] -"python/paddle/distributed/fleet/base/util_factory.py" = ["UP032"] -"python/paddle/distributed/fleet/cloud_utils.py" = ["UP032"] -"python/paddle/distributed/fleet/elastic/manager.py" = ["UP032"] -"python/paddle/distributed/fleet/launch.py" = ["UP032"] -"python/paddle/distributed/fleet/launch_utils.py" = ["UP032"] -"python/paddle/distributed/fleet/layers/mpu/mp_layers.py" = ["UP032"] -"python/paddle/distributed/fleet/layers/mpu/mp_ops.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py" = ["UP032"] -"python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py" = ["UP032"] -"python/paddle/distributed/fleet/recompute/recompute_hybrid.py" = ["UP032"] -"python/paddle/distributed/fleet/runtime/parameter_server_runtime.py" = ["UP032"] -"python/paddle/distributed/fleet/runtime/the_one_ps.py" = ["UP032"] -"python/paddle/distributed/fleet/utils/fs.py" = ["UP032"] -"python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py" = ["UP032"] -"python/paddle/distributed/fleet/utils/sequence_parallel_utils.py" = ["UP032"] -"python/paddle/distributed/launch/context/device.py" = ["UP032"] -"python/paddle/distributed/launch/controllers/collective.py" = ["UP032"] -"python/paddle/distributed/launch/job/pod.py" = ["UP032"] -"python/paddle/distributed/launch/plugins/__init__.py" = ["UP032"] -"python/paddle/distributed/parallel.py" = ["UP032"] -"python/paddle/distributed/passes/auto_parallel_amp.py" = ["UP032"] -"python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py" = ["UP032"] -"python/paddle/distributed/passes/auto_parallel_fp16.py" = ["UP032"] -"python/paddle/distributed/passes/auto_parallel_grad_clip.py" = ["UP032"] -"python/paddle/distributed/passes/auto_parallel_pipeline.py" = ["UP032"] -"python/paddle/distributed/passes/auto_parallel_sharding.py" = ["UP032"] -"python/paddle/distributed/passes/pipeline_scheduler_pass.py" = ["UP032"] -"python/paddle/distributed/ps/coordinator.py" = ["UP032"] -"python/paddle/distributed/ps/the_one_ps.py" = ["UP032"] -"python/paddle/distributed/ps/utils/public.py" = ["UP032"] -"python/paddle/distributed/utils/launch_utils.py" = ["UP032"] -"python/paddle/distributed/utils/nccl_utils.py" = ["UP032"] -"python/paddle/fft.py" = ["UP032"] -"python/paddle/framework/io.py" = ["UP032"] -"python/paddle/framework/random.py" = ["UP032"] -"python/paddle/hapi/callbacks.py" = ["UP032"] -"python/paddle/hapi/hub.py" = ["UP032"] -"python/paddle/hapi/model_summary.py" = ["UP032"] -"python/paddle/hapi/progressbar.py" = ["UP032"] -"python/paddle/incubate/asp/asp.py" = ["UP032"] -"python/paddle/incubate/asp/supported_layer_list.py" = ["UP032"] -"python/paddle/incubate/asp/utils.py" = ["UP032"] -"python/paddle/incubate/distributed/fleet/fleet_util.py" = ["UP032"] -"python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py" = ["UP032"] -"python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py" = ["UP032"] -"python/paddle/incubate/distributed/fleet/utils.py" = ["UP032"] -"python/paddle/incubate/nn/layer/fused_dropout_nd.py" = ["UP032"] -"python/paddle/incubate/nn/layer/fused_transformer.py" = ["UP032"] -"python/paddle/incubate/optimizer/functional/bfgs.py" = ["UP032"] -"python/paddle/incubate/optimizer/functional/lbfgs.py" = ["UP032"] -"python/paddle/incubate/optimizer/recompute.py" = ["UP032"] -"python/paddle/incubate/passes/ir.py" = ["UP032"] -"python/paddle/io/dataloader/batch_sampler.py" = ["UP032"] -"python/paddle/io/dataloader/collate.py" = ["UP032"] -"python/paddle/io/dataloader/dataloader_iter.py" = ["UP032"] -"python/paddle/io/dataloader/flat.py" = ["UP032"] -"python/paddle/io/dataloader/sampler.py" = ["UP032"] -"python/paddle/io/dataloader/worker.py" = ["UP032"] -"python/paddle/io/reader.py" = ["UP032"] -"python/paddle/jit/api.py" = ["UP032"] -"python/paddle/jit/dy2static/base_transformer.py" = ["UP032"] -"python/paddle/jit/dy2static/basic_api_transformer.py" = ["UP032"] -"python/paddle/jit/dy2static/convert_operators.py" = ["UP032"] -"python/paddle/jit/dy2static/decorator_transformer.py" = ["UP032"] -"python/paddle/jit/dy2static/error.py" = ["UP032"] -"python/paddle/jit/dy2static/function_spec.py" = ["UP032"] -"python/paddle/jit/dy2static/logical_transformer.py" = ["UP032"] -"python/paddle/jit/dy2static/origin_info.py" = ["UP032"] -"python/paddle/jit/dy2static/utils.py" = ["UP032"] -"python/paddle/jit/dy2static/variable_trans_func.py" = ["UP032"] -"python/paddle/metric/metrics.py" = ["UP032"] -"python/paddle/nn/functional/activation.py" = ["UP032"] -"python/paddle/nn/functional/common.py" = ["UP032"] -"python/paddle/nn/functional/conv.py" = ["UP032"] -"python/paddle/nn/functional/extension.py" = ["UP032"] -"python/paddle/nn/functional/input.py" = ["UP032"] -"python/paddle/nn/functional/norm.py" = ["UP032"] -"python/paddle/nn/functional/pooling.py" = ["UP032"] -"python/paddle/nn/functional/vision.py" = ["UP032"] -"python/paddle/nn/initializer/initializer.py" = ["UP032"] -"python/paddle/nn/layer/activation.py" = ["UP032"] -"python/paddle/nn/layer/common.py" = ["UP032"] -"python/paddle/nn/layer/container.py" = ["UP032"] -"python/paddle/nn/layer/layers.py" = ["UP032"] -"python/paddle/nn/layer/loss.py" = ["UP032"] -"python/paddle/nn/layer/norm.py" = ["UP032"] -"python/paddle/nn/layer/pooling.py" = ["UP032"] -"python/paddle/nn/layer/rnn.py" = ["UP032"] -"python/paddle/nn/layer/transformer.py" = ["UP032"] -"python/paddle/nn/layer/vision.py" = ["UP032"] -"python/paddle/nn/utils/spectral_norm_hook.py" = ["UP032"] -"python/paddle/nn/utils/weight_norm_hook.py" = ["UP032"] -"python/paddle/onnx/export.py" = ["UP032"] -"python/paddle/optimizer/lr.py" = ["UP032"] -"python/paddle/optimizer/optimizer.py" = ["UP032"] -"python/paddle/profiler/profiler.py" = ["UP032"] -"python/paddle/profiler/profiler_statistic.py" = ["UP032"] -"python/paddle/profiler/timer.py" = ["UP032"] -"python/paddle/signal.py" = ["UP032"] -"python/paddle/sparse/creation.py" = ["UP032"] -"python/paddle/sparse/nn/functional/conv.py" = ["UP032"] -"python/paddle/sparse/unary.py" = ["UP032"] -"python/paddle/static/amp/bf16/amp_utils.py" = ["UP032"] -"python/paddle/static/amp/fp16_utils.py" = ["UP032"] -"python/paddle/static/io.py" = ["UP032"] -"python/paddle/static/nn/common.py" = ["UP032"] -"python/paddle/static/nn/control_flow.py" = ["UP032"] -"python/paddle/static/quantization/quantization_pass.py" = ["UP032"] -"python/paddle/tensor/linalg.py" = ["UP032"] -"python/paddle/tensor/manipulation.py" = ["UP032"] -"python/paddle/tensor/math.py" = ["UP032"] -"python/paddle/tensor/to_string.py" = ["UP032"] -"python/paddle/utils/cpp_extension/cpp_extension.py" = ["UP032"] -"python/paddle/utils/cpp_extension/extension_utils.py" = ["UP032"] -"python/paddle/utils/deprecated.py" = ["UP032"] -"python/paddle/utils/dlpack.py" = ["UP032"] -"python/paddle/utils/download.py" = ["UP032"] -"python/paddle/utils/install_check.py" = ["UP032"] -"python/paddle/utils/layers_utils.py" = ["UP032"] -"python/paddle/vision/datasets/cifar.py" = ["UP032"] -"python/paddle/vision/datasets/flowers.py" = ["UP032"] -"python/paddle/vision/datasets/mnist.py" = ["UP032"] -"python/paddle/vision/datasets/voc2012.py" = ["UP032"] -"python/paddle/vision/image.py" = ["UP032"] -"python/paddle/vision/models/densenet.py" = ["UP032"] -"python/paddle/vision/models/mobilenetv3.py" = ["UP032"] -"python/paddle/vision/models/squeezenet.py" = ["UP032"] -"python/paddle/vision/transforms/functional_tensor.py" = ["UP032"] -"python/paddle/vision/transforms/transforms.py" = ["UP032"] -"test/book/test_word2vec_book.py" = ["UP032"] -"test/cinn/op_mappers/op_mapper_test.py" = ["UP032"] -"test/cinn/passes/pass_test.py" = ["UP032"] -"test/cinn/test_paddle_model_convertor.py" = ["UP032"] -"test/collective/fleet/parallel_dygraph_se_resnext.py" = ["UP032"] -"test/collective/fleet/test_parallel_dygraph_pp_adaptor.py" = ["UP032"] -"test/contrib/test_multi_precision_fp16_train.py" = ["UP032"] -"test/cpp/inference/api/full_ILSVRC2012_val_preprocess.py" = ["UP032"] -"test/cpp_extension/test_cpp_extension_setup.py" = ["UP032"] -"test/cpp_extension/test_mixed_extension_setup.py" = ["UP032"] -"test/cpp_extension/utils.py" = ["UP032"] -"test/custom_kernel/test_custom_kernel_dot.py" = ["UP032"] -"test/custom_op/test_context_pool.py" = ["UP032"] -"test/custom_op/test_custom_attrs_jit.py" = ["UP032"] -"test/custom_op/test_custom_cast_op_jit.py" = ["UP032"] -"test/custom_op/test_custom_concat.py" = ["UP032"] -"test/custom_op/test_custom_relu_op_jit.py" = ["UP032"] -"test/custom_op/test_custom_relu_op_setup.py" = ["UP032"] -"test/custom_op/test_custom_relu_op_xpu_setup.py" = ["UP032"] -"test/custom_op/test_custom_simple_slice.py" = ["UP032"] -"test/custom_op/test_custom_tensor_operator.py" = ["UP032"] -"test/custom_op/utils.py" = ["UP032"] -"test/custom_runtime/test_collective_process_group_xccl.py" = ["UP032"] -"test/custom_runtime/test_custom_cpu_plugin.py" = ["UP032"] -"test/custom_runtime/test_custom_cpu_profiler_plugin.py" = ["UP032"] -"test/custom_runtime/test_custom_cpu_to_static.py" = ["UP032"] -"test/custom_runtime/test_custom_op_setup.py" = ["UP032"] -"test/distributed_passes/dist_pass_test_base.py" = ["UP032"] -"test/dygraph_to_static/test_break_continue.py" = ["UP032"] -"test/dygraph_to_static/test_build_strategy.py" = ["UP032"] -"test/dygraph_to_static/test_cache_program.py" = ["UP032"] -"test/dygraph_to_static/test_cast.py" = ["UP032"] -"test/dygraph_to_static/test_container.py" = ["UP032"] -"test/dygraph_to_static/test_convert_call.py" = ["UP032"] -"test/dygraph_to_static/test_dict.py" = ["UP032"] -"test/dygraph_to_static/test_error.py" = ["UP032"] -"test/dygraph_to_static/test_fetch_feed.py" = ["UP032"] -"test/dygraph_to_static/test_lac.py" = ["UP032"] -"test/dygraph_to_static/test_layer_hook.py" = ["UP032"] -"test/dygraph_to_static/test_list.py" = ["UP032"] -"test/dygraph_to_static/test_logical.py" = ["UP032"] -"test/dygraph_to_static/test_lstm.py" = ["UP032"] -"test/dygraph_to_static/test_mnist.py" = ["UP032"] -"test/dygraph_to_static/test_mnist_amp.py" = ["UP032"] -"test/dygraph_to_static/test_mnist_pure_fp16.py" = ["UP032"] -"test/dygraph_to_static/test_mobile_net.py" = ["UP032"] -"test/dygraph_to_static/test_resnet.py" = ["UP032"] -"test/dygraph_to_static/test_resnet_amp.py" = ["UP032"] -"test/dygraph_to_static/test_resnet_pure_fp16.py" = ["UP032"] -"test/dygraph_to_static/test_resnet_v2.py" = ["UP032"] -"test/dygraph_to_static/test_se_resnet.py" = ["UP032"] -"test/dygraph_to_static/test_seq2seq.py" = ["UP032"] -"test/dygraph_to_static/yolov3.py" = ["UP032"] -"test/fft/spectral_op_np.py" = ["UP032"] -"test/ir/inference/test_trt_convert_multiclass_nms.py" = ["UP032"] -"test/ir/inference/test_trt_convert_multiclass_nms3.py" = ["UP032"] -"test/ir/inference/test_trt_pool3d_op.py" = ["UP032"] -"test/ir/inference/test_trt_pool_op.py" = ["UP032"] -"test/legacy_test/auto_parallel_autoconvert.py" = ["UP032"] -"test/legacy_test/benchmark.py" = ["UP032"] -"test/legacy_test/dist_fleet_ctr.py" = ["UP032"] -"test/legacy_test/dist_fleet_ctr_ps_gpu.py" = ["UP032"] -"test/legacy_test/dist_fleet_simnet_bow.py" = ["UP032"] -"test/legacy_test/dist_fleet_sparse_embedding_ctr.py" = ["UP032"] -"test/legacy_test/dist_fleet_sync_batch_norm.py" = ["UP032"] -"test/legacy_test/dist_se_resnext.py" = ["UP032"] -"test/legacy_test/eager_op_test.py" = ["UP032"] -"test/legacy_test/fleet_meta_optimizer_base.py" = ["UP032"] -"test/legacy_test/gradient_checker.py" = ["UP032"] -"test/legacy_test/test_chunk_eval_op.py" = ["UP032"] -"test/legacy_test/test_detach.py" = ["UP032"] -"test/legacy_test/test_dist_base.py" = ["UP032"] -"test/legacy_test/test_dist_fleet_base.py" = ["UP032"] -"test/legacy_test/test_eager_deletion_delete_vars.py" = ["UP032"] -"test/legacy_test/test_fused_dropout_add_op.py" = ["UP032"] -"test/legacy_test/test_generate_proposals_op.py" = ["UP032"] -"test/legacy_test/test_generator_dataloader.py" = ["UP032"] -"test/legacy_test/test_imperative_resnet.py" = ["UP032"] -"test/legacy_test/test_imperative_se_resnext.py" = ["UP032"] -"test/legacy_test/test_inplace.py" = ["UP032"] -"test/legacy_test/test_layers.py" = ["UP032"] -"test/legacy_test/test_lstm_cudnn_op.py" = ["UP032"] -"test/legacy_test/test_multi_dot_op.py" = ["UP032"] -"test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py" = ["UP032"] -"test/legacy_test/test_multiprocess_dataloader_static.py" = ["UP032"] -"test/legacy_test/test_ops_nms.py" = ["UP032"] -"test/legacy_test/test_pylayer_op.py" = ["UP032"] -"test/legacy_test/test_run.py" = ["UP032"] -"test/legacy_test/test_sample_logits_op.py" = ["UP032"] -"test/legacy_test/test_signal.py" = ["UP032"] -"test/legacy_test/test_static_save_load.py" = ["UP032"] -"test/legacy_test/test_sync_batch_norm_op.py" = ["UP032"] -"test/legacy_test/test_translated_layer.py" = ["UP032"] -"test/legacy_test/test_tril_triu_op.py" = ["UP032"] -"test/legacy_test/test_variable.py" = ["UP032"] -"test/legacy_test/test_view_op_reuse_allocation.py" = ["UP032"] -"test/mkldnn/test_onnx_format_quantization_mobilenetv1.py" = ["UP032"] -"test/ps/static_gpubox_trainer.py" = ["UP032"] -"test/quantization/quant2_int8_image_classification_comparison.py" = ["UP032"] -"test/quantization/quant2_int8_lstm_model.py" = ["UP032"] -"test/quantization/quant_int8_image_classification_comparison.py" = ["UP032"] -"test/quantization/test_imperative_ptq.py" = ["UP032"] -"test/quantization/test_imperative_qat_amp.py" = ["UP032"] -"test/quantization/test_post_training_quantization_lstm_model.py" = ["UP032"] -"test/quantization/test_post_training_quantization_mnist.py" = ["UP032"] -"test/quantization/test_post_training_quantization_mobilenetv1.py" = ["UP032"] -"test/quantization/test_post_training_quantization_while.py" = ["UP032"] -"test/quantization/test_quant_post_quant_aware.py" = ["UP032"] -"test/quantization/test_weight_quantization_mobilenetv1.py" = ["UP032"] -"test/rnn/rnn_numpy.py" = ["UP032"] -"test/tokenizer/bert_tokenizer.py" = ["UP032"] -"test/tokenizer/tokenizer_utils.py" = ["UP032"] -"test/xpu/test_generate_proposals_v2_op_xpu.py" = ["UP032"] -"test/xpu/test_tril_triu_op_xpu.py" = ["UP032"] -"tools/analysisPyXml.py" = ["UP032"] -"tools/check_op_benchmark_result.py" = ["UP032"] -"tools/check_op_desc.py" = ["UP032"] -"tools/count_api_without_core_ops.py" = ["UP032"] -"tools/coverage/gcda_clean.py" = ["UP032"] -"tools/coverage/python_coverage.py" = ["UP032"] -"tools/externalError/spider.py" = ["UP032"] -"tools/get_single_test_cov.py" = ["UP032"] -"tools/parse_kernel_info.py" = ["UP032"] -"tools/print_signatures.py" = ["UP032"] -"tools/sampcd_processor_utils.py" = ["UP032"] -"test/custom_kernel/test_custom_kernel_load.py" = ["UP032"] -"python/paddle/distributed/fleet/utils/tensor_parallel_utils.py" = ["UP032"] diff --git a/python/paddle/amp/accuracy_compare.py b/python/paddle/amp/accuracy_compare.py index 761f088c99f8f..52a4c4c2ef85d 100644 --- a/python/paddle/amp/accuracy_compare.py +++ b/python/paddle/amp/accuracy_compare.py @@ -705,35 +705,23 @@ def compare_accuracy( ) for filename in sorted(workerlog_filenames): - print( - "-- [Step 1/4] Parsing FP32 logs under {}/{}".format( - dump_path, filename - ) - ) + print(f"-- [Step 1/4] Parsing FP32 logs under {dump_path}/{filename}") fp32_tensor_info_list, fp32_has_tensor_name = parse_log( dump_path, filename, None ) print( - "-- [Step 2/4] Parsing FP16 logs under {}/{}".format( - another_dump_path, filename - ) + f"-- [Step 2/4] Parsing FP16 logs under {another_dump_path}/{filename}" ) fp16_tensor_info_list, fp16_has_tensor_name = parse_log( another_dump_path, filename, None ) - print( - "-- [Step 3/4] Merge FP32 and FP16 tensor info for {}".format( - filename - ) - ) + print(f"-- [Step 3/4] Merge FP32 and FP16 tensor info for {filename}") mp_tensor_info_list = merge_tensor_info_list( fp32_tensor_info_list, fp16_tensor_info_list, grad_scale ) print( - "-- [Step 4/4] Add worksheet for mixed precision tensor info of {}".format( - filename - ) + f"-- [Step 4/4] Add worksheet for mixed precision tensor info of {filename}" ) excel_writer.add_worksheet( mp_tensor_info_list, diff --git a/python/paddle/audio/backends/init_backend.py b/python/paddle/audio/backends/init_backend.py index e9793bcd9736c..12e3a0d84c9e3 100644 --- a/python/paddle/audio/backends/init_backend.py +++ b/python/paddle/audio/backends/init_backend.py @@ -83,9 +83,9 @@ def list_available_backends() -> List[str]: version = paddleaudio.__version__ if not _check_version(version): err_msg = ( - "the version of paddleaudio installed is {},\n" + f"the version of paddleaudio installed is {version},\n" "please ensure the paddleaudio >= 1.0.2." - ).format(version) + ) raise ImportError(err_msg) backends = paddleaudio.backends.list_audio_backends() backends.append("wave_backend") diff --git a/python/paddle/audio/backends/wave_backend.py b/python/paddle/audio/backends/wave_backend.py index 1dcd48e1917af..262ccafeb304a 100644 --- a/python/paddle/audio/backends/wave_backend.py +++ b/python/paddle/audio/backends/wave_backend.py @@ -28,9 +28,9 @@ def _error_message(): warn_msg = ( "only PCM16 WAV supportted. \n" "if want support more other audio types, please " - "manually installed (usually with `pip install {}`). \n " + f"manually installed (usually with `pip install {package}`). \n " "and use paddle.audio.backends.set_backend('soundfile') to set audio backend" - ).format(package) + ) return warn_msg diff --git a/python/paddle/batch.py b/python/paddle/batch.py index 788e413fa96c9..98e5a6a14545a 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -67,7 +67,7 @@ def batch_reader(): if batch_size <= 0: raise ValueError( "batch_size should be a positive integer value, " - "but got batch_size={}".format(batch_size) + f"but got batch_size={batch_size}" ) return batch_reader diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index 6b51b27c66712..4695b633ffa0f 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -91,9 +91,7 @@ def download(url, module_name, md5sum, save_name=None): retry += 1 else: raise RuntimeError( - "Cannot download {} within retry limit {}".format( - url, retry_limit - ) + f"Cannot download {url} within retry limit {retry_limit}" ) sys.stderr.write( f"Cache file {filename} not found, downloading {url} \n" diff --git a/python/paddle/device/cuda/__init__.py b/python/paddle/device/cuda/__init__.py index ba4b4eedba12a..cb57e674e2017 100644 --- a/python/paddle/device/cuda/__init__.py +++ b/python/paddle/device/cuda/__init__.py @@ -476,9 +476,9 @@ def get_device_properties(device=None): ) else: raise ValueError( - "The device type {} is not expected. Because paddle.device.cuda." + f"The device type {device} is not expected. Because paddle.device.cuda." "get_device_properties only support int, str or paddle.CUDAPlace. " - "Please input appropriate device again!".format(device) + "Please input appropriate device again!" ) else: device_id = -1 diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index ba8003dcd86fd..c8ab91a7346f0 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -70,9 +70,7 @@ def shard_tensor(x, process_mesh=None, shard_spec=None): if process_mesh is not None: assert isinstance( process_mesh, core.ProcessMesh - ), "Argument process_mesh {} is not an instance of ProcessMesh".format( - process_mesh - ) + ), f"Argument process_mesh {process_mesh} is not an instance of ProcessMesh" else: process_mesh = get_current_process_mesh() assert ( @@ -163,9 +161,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None): if process_mesh is not None: assert isinstance( process_mesh, ProcessMesh - ), "Argument process_mesh {} is not an instance of ProcessMesh".format( - process_mesh - ) + ), f"Argument process_mesh {process_mesh} is not an instance of ProcessMesh" else: process_mesh = get_current_process_mesh() assert ( @@ -176,9 +172,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None): assert all( (isinstance(shard_spec, list) or shard_spec is None) for shard_spec in in_shard_specs - ), "in_shard_spec {} is not a list of list or None".format( - in_shard_specs - ) + ), f"in_shard_spec {in_shard_specs} is not a list of list or None" for shard_spec in in_shard_specs: if shard_spec is not None: in_dims_mappings.append( @@ -191,9 +185,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None): assert all( (isinstance(shard_spec, list) or shard_spec is None) for shard_spec in out_shard_specs - ), "out_shard_spec {} is not a list of list or None".format( - out_shard_specs - ) + ), f"out_shard_spec {out_shard_specs} is not a list of list or None" for shard_spec in out_shard_specs: if shard_spec is not None: out_dims_mappings.append( diff --git a/python/paddle/distributed/auto_parallel/static/completion.py b/python/paddle/distributed/auto_parallel/static/completion.py index fc7646fa3b5f3..baf183f55bbae 100644 --- a/python/paddle/distributed/auto_parallel/static/completion.py +++ b/python/paddle/distributed/auto_parallel/static/completion.py @@ -1211,9 +1211,7 @@ def _get_op_by_id(ops, id): output_name = grad_op.output_arg_names[0] assert ( output_name in grad_var_to_var[appended_grad_times] - ), "sum op's output '{}' has no corresponding var".format( - output_name - ) + ), f"sum op's output '{output_name}' has no corresponding var" ref_fwd_var_name = grad_var_to_var[appended_grad_times][ output_name ] @@ -1513,9 +1511,7 @@ def _get_op_by_id(ops, id): output_name = grad_op.output_arg_names[0] assert ( output_name in grad_var_to_var - ), "sum op's output '{}' has no corresponding var".format( - output_name - ) + ), f"sum op's output '{output_name}' has no corresponding var" ref_fwd_var_name = grad_var_to_var[output_name] ref_fwd_var = vars[ref_fwd_var_name] ref_fwd_dist_attr = ( diff --git a/python/paddle/distributed/auto_parallel/static/converter.py b/python/paddle/distributed/auto_parallel/static/converter.py index 710dfb43e877b..3c22f14b01a60 100644 --- a/python/paddle/distributed/auto_parallel/static/converter.py +++ b/python/paddle/distributed/auto_parallel/static/converter.py @@ -69,7 +69,7 @@ def _check_pre_strategy(self, pre_strategy): if not isinstance(pre_strategy, dict): raise TypeError( "The type of 'pre_strategy' should be 'dict', " - "but got '{}'.".format(str(type(pre_strategy))) + f"but got '{str(type(pre_strategy))}'." ) return pre_strategy @@ -82,7 +82,7 @@ def _check_cur_strategy(self, cur_strategy): if not isinstance(cur_strategy, dict): raise TypeError( "The type of 'cur_strategy' should be 'dict', " - "but got '{}'.".format(str(type(cur_strategy))) + f"but got '{str(type(cur_strategy))}'." ) return cur_strategy @@ -229,9 +229,7 @@ def convert_with_prefix_match( + str(err) ) self._logger.info( - "tensor [{}] is matched with tensor [{}]".format( - cur_name, pre_name - ) + f"tensor [{cur_name}] is matched with tensor [{pre_name}]" ) tensor_match_with_pre.append(cur_name) tensor_match_with_cur.append(pre_name) @@ -309,9 +307,7 @@ def merge_with_dist_attr(tensor_list, dist_attr): if len(partition_tensor_list) != 1: raise ValueError( - "Fail to merge tensor with dist_attr '{}'.".format( - str(dist_attr) - ) + f"Fail to merge tensor with dist_attr '{str(dist_attr)}'." ) complete_tensor = partition_tensor_list[0][0] return complete_tensor @@ -336,9 +332,7 @@ def slice_with_dist_attr(tensor, dist_attr): ) if sliced_tensor_index not in range(len(sliced_tensor_list)): raise ValueError( - "Fail to slice tensor with dist_attr '{}'.".format( - str(dist_attr) - ) + f"Fail to slice tensor with dist_attr '{str(dist_attr)}'." ) sliced_tensor = sliced_tensor_list[sliced_tensor_index] return sliced_tensor diff --git a/python/paddle/distributed/auto_parallel/static/cost/base_cost.py b/python/paddle/distributed/auto_parallel/static/cost/base_cost.py index f89a03647cfcc..7b87b31865dc0 100644 --- a/python/paddle/distributed/auto_parallel/static/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/static/cost/base_cost.py @@ -846,9 +846,7 @@ def group_ranks(self): process_group = get_process_group(ring_id) if process_group is None: raise ValueError( - "There not exists process group whose ring_id is {}.".format( - ring_id - ) + f"There not exists process group whose ring_id is {ring_id}." ) self._group_ranks = process_group.ranks return self._group_ranks @@ -858,9 +856,7 @@ def _check_comm_op_type(cls): if cls.OP_TYPE != "COMM": if cls.OP_TYPE not in COMM_OP_TYPE: raise TypeError( - "Please Check op type in {}, but got {}.".format( - COMM_OP_TYPE, cls.OP_TYPE - ) + f"Please Check op type in {COMM_OP_TYPE}, but got {cls.OP_TYPE}." ) @@ -931,9 +927,7 @@ def calc_time_by_cost_model(op, cluster=None): """Calc op time by cost model and the unit is microsecond.""" if not isinstance(op, paddle.base.framework.Operator): raise TypeError( - "OP must be paddle.base.framework.Operator, but got {}.".format( - type(op) - ) + f"OP must be paddle.base.framework.Operator, but got {type(op)}." ) if not cluster: cluster = get_default_cluster() diff --git a/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py b/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py index 17d3b0476081a..38f7a007ceaa6 100644 --- a/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py +++ b/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py @@ -54,9 +54,7 @@ def _check_args(self, tensor, dist_tensor, shape, dtype): if not isinstance(tensor, Variable): raise TypeError( - "Please check tensor type is Variable, but got {}".format( - type(tensor) - ) + f"Please check tensor type is Variable, but got {type(tensor)}" ) elif dist_tensor is not None: @@ -72,9 +70,7 @@ def _check_args(self, tensor, dist_tensor, shape, dtype): assert tensor is None and dist_tensor is None and dtype is not None if not isinstance(shape, (list, set)): raise TypeError( - "Please check shape type is list or set, but got {}".format( - type(shape) - ) + f"Please check shape type is list or set, but got {type(shape)}" ) elif dtype is not None: diff --git a/python/paddle/distributed/auto_parallel/static/cost_model.py b/python/paddle/distributed/auto_parallel/static/cost_model.py index b3e5bce8ef58f..55690e4f3de8f 100644 --- a/python/paddle/distributed/auto_parallel/static/cost_model.py +++ b/python/paddle/distributed/auto_parallel/static/cost_model.py @@ -435,9 +435,7 @@ def _merge_node(self, to_merge_node_list, merge_type='linear', nodes=None): node_cost = max(node_cost, node.cost) else: raise NotImplementedError( - 'This type of merging is not supported:{}'.format( - merge_type - ) + f'This type of merging is not supported:{merge_type}' ) merged_node_id = 'merged_' + str(len(nodes)) is_bwd = to_merge_node_list[0].is_bwd @@ -796,9 +794,7 @@ def _simulate_pipeline(self): global_time[stid] = e.e_time else: raise NotImplementedError( - 'This type of pipe event is not supported yet.{}'.format( - e.name - ) + f'This type of pipe event is not supported yet.{e.name}' ) for t in global_time: diff --git a/python/paddle/distributed/auto_parallel/static/dist_context.py b/python/paddle/distributed/auto_parallel/static/dist_context.py index 5eabdd312bbb7..58cf994c2b73b 100644 --- a/python/paddle/distributed/auto_parallel/static/dist_context.py +++ b/python/paddle/distributed/auto_parallel/static/dist_context.py @@ -1209,9 +1209,9 @@ def parse_forward_blocks(self, program): assert self.nblock >= 1 def parse_backward_blocks(self, program): - assert 0 in self.forward_indices, "forward block idx are{}".format( - self.forward_indices - ) + assert ( + 0 in self.forward_indices + ), f"forward block idx are{self.forward_indices}" self.backward_to_forward_index_map[0] = 0 for idx, block in enumerate(program.blocks): diff --git a/python/paddle/distributed/auto_parallel/static/dist_op.py b/python/paddle/distributed/auto_parallel/static/dist_op.py index 8489d3f3332a6..a728b55697bfa 100644 --- a/python/paddle/distributed/auto_parallel/static/dist_op.py +++ b/python/paddle/distributed/auto_parallel/static/dist_op.py @@ -124,8 +124,8 @@ def __str__(self): annotated_str = "annotated" else: annotated_str = "non-annotated" - str += ", process_mesh ({}): {}".format( - annotated_str, self.dist_attr.process_mesh + str += ( + f", process_mesh ({annotated_str}): {self.dist_attr.process_mesh}" ) for arg_name in self.serial_op.desc.input_arg_names(): diff --git a/python/paddle/distributed/auto_parallel/static/dist_tensor.py b/python/paddle/distributed/auto_parallel/static/dist_tensor.py index d44fa513f1a33..32a4f43434118 100644 --- a/python/paddle/distributed/auto_parallel/static/dist_tensor.py +++ b/python/paddle/distributed/auto_parallel/static/dist_tensor.py @@ -399,8 +399,8 @@ def __str__(self): annotated_str = "annotated" else: annotated_str = "non-annotated" - str += ", process_mesh ({}): {}".format( - annotated_str, self.dist_attr.process_mesh + str += ( + f", process_mesh ({annotated_str}): {self.dist_attr.process_mesh}" ) str += f", is_parameter: {self.serial_tensor.is_parameter}" @@ -409,9 +409,7 @@ def __str__(self): annotated_str = "annotated" else: annotated_str = "non-annotated" - str += ", dims_mapping ({}): {} }}".format( - annotated_str, self.dist_attr.dims_mapping - ) + str += f", dims_mapping ({annotated_str}): {self.dist_attr.dims_mapping} }}" # if self.dist_attr.is_annotated("shard_mask"): # annotated_str = "annotated" diff --git a/python/paddle/distributed/auto_parallel/static/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py index c7ade53ebdb24..393d59c8c9a63 100644 --- a/python/paddle/distributed/auto_parallel/static/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -158,9 +158,7 @@ def __init__( for metric in auto_utils.to_list(metrics): if metric and not isinstance(metric, Metric): raise TypeError( - "{} is not sub class of Metric".format( - metric.__class__.__name__ - ) + f"{metric.__class__.__name__} is not sub class of Metric" ) self._metrics = auto_utils.to_list(metrics) @@ -331,9 +329,7 @@ def _prepare_data_tensor(self, inputs_spec, labels_spec, inputs, labels): if inputs_spec: assert isinstance( inputs_spec, list - ), "inputs should be list, but received {}".format( - type(inputs_spec) - ) + ), f"inputs should be list, but received {type(inputs_spec)}" assert isinstance( inputs, list ), f"inputs should be list, but received {type(inputs)}" @@ -346,9 +342,7 @@ def _prepare_data_tensor(self, inputs_spec, labels_spec, inputs, labels): if labels_spec: assert isinstance( labels_spec, list - ), "labels should be list, but received {}".format( - type(labels_spec) - ) + ), f"labels should be list, but received {type(labels_spec)}" assert isinstance( labels, list ), f"labels should be list, but received {type(labels)}" @@ -457,9 +451,7 @@ def _prepare_feed(self, data, user_feeds, mode): if user_feeds is not None: assert isinstance( user_feeds, dict - ), "user_feeds must be a dict, but receive {}".format( - type(user_feeds).__name__ - ) + ), f"user_feeds must be a dict, but receive {type(user_feeds).__name__}" for name, data in user_feeds.items(): feeds[name] = data return feeds @@ -468,9 +460,7 @@ def _prepare_fetch(self, user_fetches, mode): if user_fetches is not None: assert isinstance( user_fetches, list - ), "user_fetches must be a list, but receive {}".format( - type(user_fetches).__name__ - ) + ), f"user_fetches must be a list, but receive {type(user_fetches).__name__}" fetch_names = [] fetch_indices = [] diff --git a/python/paddle/distributed/auto_parallel/static/graph.py b/python/paddle/distributed/auto_parallel/static/graph.py index d4cace82585b3..5665294487e9c 100644 --- a/python/paddle/distributed/auto_parallel/static/graph.py +++ b/python/paddle/distributed/auto_parallel/static/graph.py @@ -84,9 +84,7 @@ def __contains__(self, attr_name): def __str__(self): str = "" - str += "(src_id: {}, tgt_id: {}, attrs: {})".format( - self.src_id, self.tgt_id, self._attrs - ) + str += f"(src_id: {self.src_id}, tgt_id: {self.tgt_id}, attrs: {self._attrs})" return str diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py b/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py index a531b3bb8c660..1c490345ff1c6 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py @@ -85,9 +85,7 @@ def backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), "backward op [{}] don't have dist attribute !".format( - str(backward_op) - ) + ), f"backward op [{str(backward_op)}] don't have dist attribute !" assert rank_id in dist_attr.process_mesh.process_ids diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_default.py b/python/paddle/distributed/auto_parallel/static/operators/dist_default.py index 93833651dd185..04bca9c95ddbe 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_default.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_default.py @@ -455,21 +455,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" # replicate op in dist program dist_op = main_block.append_op(type='nop') @@ -575,28 +569,20 @@ def backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), "backward op [{}] don't have dist attribute !".format( - str(backward_op) - ) + ), f"backward op [{str(backward_op)}] don't have dist attribute !" rank_id = dist_op_context.rank_id # check validation of inputs / outputs for input_name in backward_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( backward_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in backward_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( backward_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" # replicate op in dist program dist_op_desc = main_block.append_op(type='nop').desc diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py index 4459f9b63f759..42ddfc4b0d4b3 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py @@ -498,17 +498,13 @@ def forward(ctx, *args, **kwargs): embedding_op_dist_attr.impl_idx = op_dist_attr.impl_idx for input_varname in c_embedding_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert input_dist_attr is not None, f"dist_attr is {op_dist_attr}" embedding_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) output_varname = c_embedding_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" embedding_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -528,9 +524,7 @@ def forward(ctx, *args, **kwargs): ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" allreduce_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -583,9 +577,7 @@ def backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), "backward op [{}] don't have dist attribute !".format( - str(backward_op) - ) + ), f"backward op [{str(backward_op)}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in dist_attr.process_mesh.process_ids: diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py index 57265235a5e9d..1386c5e661cc8 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py @@ -353,9 +353,7 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): assert not is_parameter_related( X_var.name, main_block - ), "left operand(X) [{}] of dist matmul should not be parameter".format( - X_var.name - ) + ), f"left operand(X) [{X_var.name}] of dist matmul should not be parameter" X_var_dims_mapping = dist_attr.get_input_dims_mapping(X_var.name) Y_var_dim_mapping = dist_attr.get_input_dims_mapping(Y_var.name) @@ -722,21 +720,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -814,9 +806,9 @@ def forward(ctx, *args, **kwargs): input_dist_attr = op_dist_attr.get_input_dist_attr( input_varname ) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert ( + input_dist_attr is not None + ), f"dist_attr is {op_dist_attr}" matmul_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) @@ -831,9 +823,7 @@ def forward(ctx, *args, **kwargs): # output output_varname = matmul_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmul_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -1043,21 +1033,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -1161,17 +1145,13 @@ def forward(ctx, *args, **kwargs): matmul_op_dist_attr.impl_idx = op_dist_attr.impl_idx for input_varname in matmul_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert input_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmul_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) output_varname = matmul_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmul_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -1191,9 +1171,7 @@ def forward(ctx, *args, **kwargs): ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" allreduce_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -1560,21 +1538,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -1653,9 +1625,9 @@ def forward(ctx, *args, **kwargs): input_dist_attr = op_dist_attr.get_input_dist_attr( input_varname ) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert ( + input_dist_attr is not None + ), f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) @@ -1669,9 +1641,7 @@ def forward(ctx, *args, **kwargs): ) for output_varname in matmul_v2_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -1881,21 +1851,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -1998,17 +1962,13 @@ def forward(ctx, *args, **kwargs): matmulv2_op_dist_attr.impl_idx = op_dist_attr.impl_idx for input_varname in matmul_v2_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert input_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) output_varname = matmul_v2_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -2028,9 +1988,7 @@ def forward(ctx, *args, **kwargs): ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" allreduce_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -2389,21 +2347,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -2495,9 +2447,9 @@ def forward(ctx, *args, **kwargs): input_dist_attr = op_dist_attr.get_input_dist_attr( input_varname ) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert ( + input_dist_attr is not None + ), f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) @@ -2511,9 +2463,7 @@ def forward(ctx, *args, **kwargs): ) for output_varname in mul_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -2717,21 +2667,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -2849,17 +2793,13 @@ def forward(ctx, *args, **kwargs): matmulv2_op_dist_attr.impl_idx = op_dist_attr.impl_idx for input_varname in mul_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) - assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert input_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_input_dist_attr( input_varname, input_dist_attr ) output_varname = mul_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" matmulv2_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) @@ -2879,9 +2819,7 @@ def forward(ctx, *args, **kwargs): ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) - assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr - ) + assert output_dist_attr is not None, f"dist_attr is {op_dist_attr}" allreduce_op_dist_attr.set_output_dist_attr( output_varname, output_dist_attr ) diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py b/python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py index 3007285addc70..9f322cb5caf8a 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py @@ -166,21 +166,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" if rank_id not in op_dist_attr.process_mesh.process_ids: rank_id = _get_corresponding_rank( @@ -279,21 +273,15 @@ def backward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in backward_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( backward_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in backward_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( backward_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) X_grad_var = main_block._var_recursive(kwargs['X@GRAD'][0]) diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py index 6cb9721274213..ba74be866c1ee 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py @@ -86,21 +86,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" # replicate op in dist program dist_op = main_block.append_op(type='nop') diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py index 267e8437abacc..e89caba2dd68d 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py @@ -246,21 +246,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Out_var = main_block._var_recursive(kwargs['Out'][0]) @@ -508,21 +502,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Out_var = main_block._var_recursive(kwargs['Out'][0]) @@ -763,21 +751,15 @@ def forward(ctx, *args, **kwargs): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): - assert input_name in kwargs, "input [{}] is not given".format( - input_name - ) + assert input_name in kwargs, f"input [{input_name}] is not given" assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), f"number of tensor for input [{input_name}] is not match" for output_name in src_op.desc.output_names(): - assert output_name in kwargs, "input [{}] is not given".format( - output_name - ) + assert output_name in kwargs, f"input [{output_name}] is not given" assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) - ), "number of tensor for input [{}] is not match".format( - output_name - ) + ), f"number of tensor for input [{output_name}] is not match" X_var = main_block._var_recursive(kwargs['X'][0]) Out_var = main_block._var_recursive(kwargs['Out'][0]) diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py b/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py index 1c39dd6b2fd53..8ff358d14b1db 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py @@ -74,9 +74,7 @@ def backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), "backward op [{}] don't have dist attribute !".format( - str(backward_op) - ) + ), f"backward op [{str(backward_op)}] don't have dist attribute !" assert rank_id in dist_attr.process_mesh.process_ids diff --git a/python/paddle/distributed/auto_parallel/static/partitioner.py b/python/paddle/distributed/auto_parallel/static/partitioner.py index 78094c73de9e0..b00baf32ec0fe 100644 --- a/python/paddle/distributed/auto_parallel/static/partitioner.py +++ b/python/paddle/distributed/auto_parallel/static/partitioner.py @@ -140,14 +140,10 @@ def partition_startup_program( output_vars = op.desc.output_arg_names() assert ( len(output_vars) == 1 - ), "initializer should output only ONE variable, but got [{}]".format( - str(op.desc) - ) + ), f"initializer should output only ONE variable, but got [{str(op.desc)}]" assert ( temp_varname_map[output_vars[0]] in var2shape - ), "try to initialize [{}] which is not a persistable var".format( - output_vars[0] - ) + ), f"try to initialize [{output_vars[0]}] which is not a persistable var" new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op.desc) new_op_desc._rename_output( @@ -393,9 +389,7 @@ def _get_dist_shape(var, dist_attr): assert len(var_shape) == len( mapping - ), "variable shape [{}] and dim_mapping [{}] is NOT match !".format( - var_shape, mapping - ) + ), f"variable shape [{var_shape}] and dim_mapping [{mapping}] is NOT match !" new_shape = [] for idx in range(len(var_shape)): if var_shape[idx] == -1 or mapping[idx] == -1: diff --git a/python/paddle/distributed/auto_parallel/static/reshard.py b/python/paddle/distributed/auto_parallel/static/reshard.py index 60dff3401dd84..facfe183c5d9a 100644 --- a/python/paddle/distributed/auto_parallel/static/reshard.py +++ b/python/paddle/distributed/auto_parallel/static/reshard.py @@ -1001,28 +1001,25 @@ def __init__( ): assert isinstance(auto_parallel_main_prog, Program), ( "The type of auto_parallel_main_prog should be Program, " - "but got {}.".format(type(auto_parallel_main_prog)) + f"but got {type(auto_parallel_main_prog)}." ) if auto_parallel_startup_prog is not None: assert isinstance(auto_parallel_main_prog, Program), ( "The type of auto_parallel_startup_prog should be Program or None, " - "but got {}.".format(type(auto_parallel_startup_prog)) + f"but got {type(auto_parallel_startup_prog)}." ) - assert isinstance( - rank_id, int - ), "The type of rank_id should be int, " "but got {}.".format( - type(rank_id) + assert isinstance(rank_id, int), ( + "The type of rank_id should be int, " f"but got {type(rank_id)}." ) assert isinstance(dist_context, DistributedContext), ( "The type of dist_context should be DistributedContext, " - "but got {}.".format(type(dist_context)) + f"but got {type(dist_context)}." ) if batch_size is not None: - assert isinstance( - batch_size, int - ), "The type of batch_size should be int, " "but got {}.".format( - type(batch_size) + assert isinstance(batch_size, int), ( + "The type of batch_size should be int, " + f"but got {type(batch_size)}." ) self._auto_parallel_main_prog = auto_parallel_main_prog @@ -1783,9 +1780,7 @@ def parse_op_desc( break assert ( idx is not None - ), "The op for reshard cannot be found in the rank {} program.".format( - self.rank_id - ) + ), f"The op for reshard cannot be found in the rank {self.rank_id} program." matched_op = block.ops[idx] source_tensor = get_var_with_recursion( diff --git a/python/paddle/distributed/auto_parallel/static/tuner/algorithms.py b/python/paddle/distributed/auto_parallel/static/tuner/algorithms.py index 5eea035ea92fd..37cb3ed501181 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/algorithms.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/algorithms.py @@ -199,9 +199,7 @@ def next_trial(self): new_strategy = copy.deepcopy(self._config.dist_strategy) recompute = new_strategy.recompute recompute.no_recompute_segments.extend(new_no_recompute) - name = "trial-recompute-part-segments-idx{}".format( - self._trial_idx - ) + name = f"trial-recompute-part-segments-idx{self._trial_idx}" return Trial(new_strategy, name, self.changed_configs) else: return Trial(None, None, None, status=TrialStatus.STOPPED) diff --git a/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py index 64eaca28c06ea..6a3365eff018b 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py @@ -538,9 +538,7 @@ def _evaluate_trial(self, trial): ) self._logger.info( - "Trial {} evaluation finish with {}.".format( - trial.name, parse_results(results) - ) + f"Trial {trial.name} evaluation finish with {parse_results(results)}." ) return results diff --git a/python/paddle/distributed/auto_parallel/static/tuner/recorder.py b/python/paddle/distributed/auto_parallel/static/tuner/recorder.py index 6faaac8977910..a1ed12187260a 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/recorder.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/recorder.py @@ -70,9 +70,7 @@ class MetricRecords: def __init__(self, direction="min"): if direction not in {"min", "max"}: raise ValueError( - "direction should be one of {{min, max}}, but got: {}.".format( - direction - ) + f"direction should be one of {{min, max}}, but got: {direction}." ) self._direction = direction self._records = {} diff --git a/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py index ad21e4f00109a..07d98d67226d7 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py @@ -1577,9 +1577,7 @@ def _is_grad_var_name(name): output_name = grad_op_next_op.output_arg_names[0] assert ( output_name in grad_var_to_var - ), "sum op's output '{}' has no corresponding var".format( - output_name - ) + ), f"sum op's output '{output_name}' has no corresponding var" ref_fwd_var_name = grad_var_to_var[output_name] ref_fwd_var = vars[ref_fwd_var_name] ref_fwd_dist_attr = sub_program_dist_context.get_tensor_dist_attr_for_program( @@ -2098,9 +2096,7 @@ def prepare(self): self.layers = self.cluster_operators() end = time.time() self._logger.info( - "Cluster operators to {} layers in {:.2f}s.".format( - len(self.layers), end - begin - ) + f"Cluster operators to {len(self.layers)} layers in {end - begin:.2f}s." ) # step2: generate sub program of each layer @@ -2175,9 +2171,7 @@ def prepare(self): self.complete_sub_bwd_programs() end = time.time() self._logger.info( - "Complete all sub backward programs in {:.2f}s.".format( - end - begin - ) + f"Complete all sub backward programs in {end - begin:.2f}s." ) # step8: complete update sub programs diff --git a/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py index 6f46ccb90132a..1aa46f4966157 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py @@ -49,9 +49,7 @@ def __init__(self, name, default): self.name = name if not isinstance(default, (str, int, float, bool)): raise ValueError( - "Fixed must be an str, int, float or bool, but found {}".format( - default - ) + f"Fixed must be an str, int, float or bool, but found {default}" ) self._default = default @@ -79,9 +77,7 @@ def random(self, seed=None): return rng.choice((True, False)) def __repr__(self): - return 'Boolean(name: "{}", default: {})'.format( - self.name, self.default - ) + return f'Boolean(name: "{self.name}", default: {self.default})' class Choice(TunableVariable): diff --git a/python/paddle/distributed/auto_parallel/static/utils.py b/python/paddle/distributed/auto_parallel/static/utils.py index c4b3e01839891..2e41c6de99802 100644 --- a/python/paddle/distributed/auto_parallel/static/utils.py +++ b/python/paddle/distributed/auto_parallel/static/utils.py @@ -285,9 +285,9 @@ def _get_comm_group(processes, shape, axis, rank): # NOTE _linear_idx2coordinate assume processes mesh start with 0 and continuous # tricks to support processes mesh when it is not start with 0 or continuous - assert rank in processes, "rank [{}] is NOT in processes group {}".format( - rank, processes - ) + assert ( + rank in processes + ), f"rank [{rank}] is NOT in processes group {processes}" rank_relatvie = processes.index(rank) coordinate = _linear_idx2coordinate(shape, rank_relatvie) coordinates_in_group = [coordinate[:] for i in range(shape[axis])] @@ -361,9 +361,7 @@ def _coordinate2linear_idx(mesh_shape, coordinate): for i in range(len(mesh_shape)): assert ( coordinate[i] >= 0 - ), "index in dimension [{}] is least than zero. coordinate: {}".format( - i, coordinate - ) + ), f"index in dimension [{i}] is least than zero. coordinate: {coordinate}" assert ( coordinate[i] < mesh_shape[i] ), "index beyond extent in dimension [{}]. shape: {}, coordinate: {}".format( @@ -400,9 +398,7 @@ def _linear_idx2coordinate(mesh_shape, linear_idx): """ - assert linear_idx >= 0, "linear index [{}] is least than zero".format( - linear_idx - ) + assert linear_idx >= 0, f"linear index [{linear_idx}] is least than zero" assert linear_idx < np.prod( mesh_shape ), "linear index beyond the extent of mesh shape. shape: {}, linear index: {}".format( @@ -450,9 +446,7 @@ def _get_unshard_dist_shape(var, dist_attr): mesh = dist_attr.process_mesh.shape assert len(var_shape) == len( mapping - ), "variable shape [{}] and dim_mapping [{}] is NOT match !".format( - var_shape, mapping - ) + ), f"variable shape [{var_shape}] and dim_mapping [{mapping}] is NOT match !" new_shape = [] for idx in range(len(var_shape)): if var_shape[idx] == -1 or mapping[idx] == -1: @@ -490,21 +484,19 @@ def _update_addition_info(addition_info): elif not isinstance(addition_info, dict): raise TypeError( "The type of 'addition_info' should be 'dict', " - "but got '{}'.".format(str(type(addition_info))) + f"but got '{str(type(addition_info))}'." ) else: for item, value in addition_info.items(): if item not in ["epoch", "batch", "batch_size"]: raise ValueError( "The key of 'addition_info' should be one of the " - "['epoch', 'batch', 'batch_size'], but got '{}'.".format( - str(item) - ) + f"['epoch', 'batch', 'batch_size'], but got '{str(item)}'." ) if not isinstance(value, int): raise ValueError( "The value of 'addition_info' should be 'int', " - "but got '{}'.".format(str(type(value))) + f"but got '{str(type(value))}'." ) add_info[item] = value return add_info @@ -519,7 +511,7 @@ def _check_valid_path(file_path): if not isinstance(file, str): raise TypeError( "The type of file path should be 'str', " - "but got '{}'.".format(str(type(file))) + f"but got '{str(type(file))}'." ) if not os.path.exists(file): raise ValueError(f"The file path '{file}' does not exist.") @@ -527,7 +519,7 @@ def _check_valid_path(file_path): else: raise TypeError( "The type of file path should be 'list', " - "but got '{}'.".format(str(type(file_path))) + f"but got '{str(type(file_path))}'." ) @@ -537,19 +529,19 @@ def _check_param_dict(param_dict): elif not isinstance(param_dict, dict): raise TypeError( "The type of 'param_dict' should be 'dict', " - "but got '{}'.".format(str(type(param_dict))) + f"but got '{str(type(param_dict))}'." ) else: for name, value in param_dict.items(): if not isinstance(name, str): raise TypeError( "The type of key of 'param_dict' should be 'str', " - "but got '{}'.".format(str(type(name))) + f"but got '{str(type(name))}'." ) if not isinstance(value, paddle.base.LoDTensor): raise TypeError( "The type of value of 'param_dict' should be 'LoDTensor', " - "but got '{}'.".format(str(type(value))) + f"but got '{str(type(value))}'." ) return param_dict @@ -560,26 +552,26 @@ def _check_dist_attr(dist_attr): elif not isinstance(dist_attr, dict): raise TypeError( "The type of 'dist_attr' should be 'dict', " - "but got '{}'.".format(str(type(dist_attr))) + f"but got '{str(type(dist_attr))}'." ) else: for name, value in dist_attr.items(): if not isinstance(name, str): raise TypeError( "The type of param name of 'dist_attr' should be 'str', " - "but got '{}'.".format(str(type(name))) + f"but got '{str(type(name))}'." ) if not isinstance(value, dict): raise TypeError( "The type of distributed attribute should be 'dict', " - "but got '{}'".format(str(type(value))) + f"but got '{str(type(value))}'" ) attr = ['process_shape', 'process_group', 'dims_mapping'] if list(value.keys()) != attr: raise ValueError( "The key of distributed attribute should be " "'['process_shape', 'process_group', 'dims_mapping']', " - "but got {}.".format(str(value.keys())) + f"but got {str(value.keys())}." ) return dist_attr @@ -878,9 +870,7 @@ def merge_and_slice_parameter(dist_param_dict, pre_dist_attr, cur_dist_attr): if not isinstance(name, str): raise TypeError( "The key of 'dist_param_dict' is parameter's name, " - "and its type should be 'str', but got {}.".format( - str(type(name)) - ) + f"and its type should be 'str', but got {str(type(name))}." ) if not isinstance(value, list) or not all( isinstance(v, np.ndarray) for v in value @@ -1897,7 +1887,7 @@ def get_lr(optimizer): else: raise TypeError( "'optimizer' must be object of class `paddle.optimizer.Optimizer`" - " or `paddle.static.Optimizer`, but got {}.".format(type(optimizer)) + f" or `paddle.static.Optimizer`, but got {type(optimizer)}." ) @@ -2045,9 +2035,7 @@ def set_recompute_segments(model, losses, strategy, program): segments.append([min_idx, max_idx + 1]) else: logging.debug( - "Could not recompute op range [{}] - [{}] ".format( - min_idx, max_idx + 1 - ) + f"Could not recompute op range [{min_idx}] - [{max_idx + 1}] " ) start_idx += 1 @@ -2255,14 +2243,10 @@ def insert_dependencies_for_two_ops( assert ( len(prior_op.output_arg_names) >= 1 - ), "first op of dependency should at least have one output. [{}]".format( - str(prior_op) - ) + ), f"first op of dependency should at least have one output. [{str(prior_op)}]" assert ( len(posterior_op.input_arg_names) >= 1 - ), "second op of dependency should at least have one input. [{}]".format( - str(posterior_op) - ) + ), f"second op of dependency should at least have one input. [{str(posterior_op)}]" prior_op_mesh = dist_context.get_op_dist_attr_for_program( prior_op ).process_mesh diff --git a/python/paddle/distributed/auto_parallel/strategy.py b/python/paddle/distributed/auto_parallel/strategy.py index 36607f0903bc7..1df4663b4fed5 100644 --- a/python/paddle/distributed/auto_parallel/strategy.py +++ b/python/paddle/distributed/auto_parallel/strategy.py @@ -26,9 +26,7 @@ def __init__(self, category, config_dict=None): self._config_dict = config_dict else: raise ValueError( - "Expected a dictionary. But received: {}".format( - config_dict - ) + f"Expected a dictionary. But received: {config_dict}" ) # Initialize attributes by the default config config = constants.get_category_default_config(self._category) diff --git a/python/paddle/distributed/cloud_utils.py b/python/paddle/distributed/cloud_utils.py index 3fd8ce5d16a3a..ff9908c09c96a 100644 --- a/python/paddle/distributed/cloud_utils.py +++ b/python/paddle/distributed/cloud_utils.py @@ -47,21 +47,17 @@ def get_cloud_cluster(args_node_ips, args_node_ip, args_port, selected_devices): if node_ip != "127.0.0.1" and node_ip != args_node_ip: logger.warning( - "Please NOTE: When using paddlecloud, node_ip is \ -automatically got from POD_IP. Your input node_ip: {} doesn't equals to \ -node_ip: {} from paddlecloud environment.".format( - args_node_ip, node_ip - ) + f"Please NOTE: When using paddlecloud, node_ip is \ +automatically got from POD_IP. Your input node_ip: {args_node_ip} doesn't equals to \ +node_ip: {node_ip} from paddlecloud environment." ) if args_node_ips != "127.0.0.1" and args_node_ips != ",".join(node_ips): logger.warning( - "Please NOTE: When using paddlecloud, cluster_node_ips is \ + f"Please NOTE: When using paddlecloud, cluster_node_ips is \ automatically got from PADDLE_TRAINERS(multi nodes) or POD_IP(single node).\ -Your input cluster_node_ips: {} doesn't equals to IPs: {} from \ -paddlecloud environment.".format( - args_node_ips, node_ips - ) +Your input cluster_node_ips: {args_node_ips} doesn't equals to IPs: {node_ips} from \ +paddlecloud environment." ) # DISTRIBUTED_TRAINER_ENDPOINTS: new environment since paddlecloud 1.8.4 @@ -101,10 +97,8 @@ def get_cloud_cluster(args_node_ips, args_node_ip, args_port, selected_devices): ) logger.debug( - "parsed from args: node_ips:{} \ - node_ip:{} node_rank:{} trainer_endpoints:{}".format( - node_ips, node_ip, node_rank, trainer_endpoints - ) + f"parsed from args: node_ips:{node_ips} \ + node_ip:{node_ip} node_rank:{node_rank} trainer_endpoints:{trainer_endpoints}" ) cluster, pod = get_cluster( diff --git a/python/paddle/distributed/communication/group.py b/python/paddle/distributed/communication/group.py index dfab85404a1d0..d73e3ce90cbd2 100644 --- a/python/paddle/distributed/communication/group.py +++ b/python/paddle/distributed/communication/group.py @@ -78,8 +78,8 @@ def get_group_rank(self, rank): return -1 def __repr__(self): - debug_str = "rank: {}, nranks: {}, id: {}, ranks: ".format( - self.rank, self.nranks, self.id + debug_str = ( + f"rank: {self.rank}, nranks: {self.nranks}, id: {self.id}, ranks: " ) debug_str += ", ".join(map(str, self.ranks)) debug_str += "; name: " @@ -112,9 +112,7 @@ def _warn_cur_rank_not_in_group(group): global_rank = dist.get_rank() if group and not group.is_member(): warnings.warn( - "Current global rank {} is not in group {}".format( - global_rank, group.name - ) + f"Current global rank {global_rank} is not in group {group.name}" ) return True return False @@ -124,9 +122,7 @@ def _get_or_throw_group_rank(global_rank, group): group_rank = group.get_group_rank(global_rank) assert ( group_rank >= 0 - ), "The input rank {} can not be found inside the group {}".format( - global_rank, group.name - ) + ), f"The input rank {global_rank} can not be found inside the group {group.name}" return group_rank diff --git a/python/paddle/distributed/communication/stream/gather.py b/python/paddle/distributed/communication/stream/gather.py index 4cb8d65c9d56f..a729d0c644537 100644 --- a/python/paddle/distributed/communication/stream/gather.py +++ b/python/paddle/distributed/communication/stream/gather.py @@ -36,9 +36,7 @@ def _gather_in_dygraph( assert ( len(gather_list) == nranks - ), " gather_list length {} and nrankd {} not equal".format( - len(gather_list), nranks - ) + ), f" gather_list length {len(gather_list)} and nrankd {nranks} not equal" task = group.process_group.gather( tensor, gather_list, dst_rank_in_group, sync_op, use_calc_stream diff --git a/python/paddle/distributed/fleet/base/orthogonal_strategy.py b/python/paddle/distributed/fleet/base/orthogonal_strategy.py index c64260bf25140..aea73054722b2 100644 --- a/python/paddle/distributed/fleet/base/orthogonal_strategy.py +++ b/python/paddle/distributed/fleet/base/orthogonal_strategy.py @@ -130,9 +130,7 @@ def rank_in_strategy(self, name): def _check_valid_strategy(self): assert len(self._list_of_strategy_name) == len( set(self._list_of_strategy_name) - ), "Defined duplicated strategies: {}".format( - self._list_of_strategy_name - ) + ), f"Defined duplicated strategies: {self._list_of_strategy_name}" num_of_ranks = functools.reduce( lambda x, y: x * y, self._list_of_degree ) @@ -145,9 +143,7 @@ def _check_valid_strategy(self): for strategy in fused_strategy: assert ( strategy in self._list_of_strategy_name - ), "Can not fuse strategy {} without defined previous.".format( - strategy - ) + ), f"Can not fuse strategy {strategy} without defined previous." def _create_fused_group(self): for name in self._fused_strategy_dict: diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 130a7a338a898..7b9cf269dcd26 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -58,8 +58,8 @@ def __init__(self): "gloo is not initialized, will not communicator with other nodes" ) self._err_type = "gloo initialized error, please check arguments" - self._err_world = "argument error, comm_world must in {}".format( - self._comm_world + self._err_world = ( + f"argument error, comm_world must in {self._comm_world}" ) self._is_initialized = False @@ -1173,9 +1173,7 @@ def _gloo_init(self): else: type = "FILE" print( - "Gloo init with {}: need_init_all: {}, args: {}".format( - type, need_init_all, kwargs - ) + f"Gloo init with {type}: need_init_all: {need_init_all}, args: {kwargs}" ) self._gloo.init( diff --git a/python/paddle/distributed/fleet/base/strategy_group.py b/python/paddle/distributed/fleet/base/strategy_group.py index 634131cf33087..6ae9c13c5e548 100644 --- a/python/paddle/distributed/fleet/base/strategy_group.py +++ b/python/paddle/distributed/fleet/base/strategy_group.py @@ -239,6 +239,4 @@ def _create_p2p_group(self): and self._send_prev_group and self._recv_next_group and self._recv_prev_group - ), "Error occurs while creating p2p group for rank {}.".format( - self._rank - ) + ), f"Error occurs while creating p2p group for rank {self._rank}." diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index 97543d9fc0476..309db09181b25 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -470,9 +470,7 @@ def reader(batch_size, fn, dim): v for v in prog.list_vars() if paddle.static.io.is_persistable(v) ] print( - "persistable vars in dump program: {}".format( - [v.name for v in saved_params] - ) + f"persistable vars in dump program: {[v.name for v in saved_params]}" ) def check_not_expected_ops(prog, not_expected_op_types): @@ -665,9 +663,7 @@ def check_not_expected_ops(prog, not_expected_op_types): ) else: print( - "load feed vars from files: {}.".format( - feed_config.feeded_vars_filelist - ) + f"load feed vars from files: {feed_config.feeded_vars_filelist}." ) feed_vars = [ inference_program.global_block().var( diff --git a/python/paddle/distributed/fleet/cloud_utils.py b/python/paddle/distributed/fleet/cloud_utils.py index 75df0fae32d1b..fcb2cf745bfd3 100644 --- a/python/paddle/distributed/fleet/cloud_utils.py +++ b/python/paddle/distributed/fleet/cloud_utils.py @@ -44,12 +44,10 @@ def get_cloud_cluster( if args_node_ips != "127.0.0.1" and args_node_ips != ",".join(node_ips): logger.warning( - "Please NOTE: When using paddlecloud, cluster_node_ips is \ + f"Please NOTE: When using paddlecloud, cluster_node_ips is \ automatically got from PADDLE_TRAINERS(multi nodes) or POD_IP(single node).\ -Your input cluster_node_ips: {} doesn't equals to IPs: {} from \ -paddlecloud environment.".format( - args_node_ips, node_ips - ) +Your input cluster_node_ips: {args_node_ips} doesn't equals to IPs: {node_ips} from \ +paddlecloud environment." ) # DISTRIBUTED_TRAINER_ENDPOINTS: new environment since paddlecloud 1.8.4 @@ -89,10 +87,8 @@ def get_cloud_cluster( ) logger.debug( - "parsed from args: node_ips:{} \ - node_ip:{} node_rank:{} trainer_endpoints:{}".format( - node_ips, node_ip, node_rank, trainer_endpoints - ) + f"parsed from args: node_ips:{node_ips} \ + node_ip:{node_ip} node_rank:{node_rank} trainer_endpoints:{trainer_endpoints}" ) cluster, pod = get_cluster( diff --git a/python/paddle/distributed/fleet/elastic/manager.py b/python/paddle/distributed/fleet/elastic/manager.py index 00151a8dee5f1..6c3810f7aae74 100644 --- a/python/paddle/distributed/fleet/elastic/manager.py +++ b/python/paddle/distributed/fleet/elastic/manager.py @@ -229,9 +229,7 @@ def __init__(self, args, etcd_client): node_tag = ''.join( random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6) ) - self.host_path = '{}/{}{}'.format( - self.node_prefix, node_tag, time.time() - ) + self.host_path = f'{self.node_prefix}/{node_tag}{time.time()}' ''' 0 group mode, be aware of healthy status of other workers 1 decouple mode, check own status only @@ -280,9 +278,7 @@ def lease_heartbeat(): ) except Exception as e: logger.error( - "[lease_heartbeat] internal error:{} {}".format( - e, traceback.format_exc() - ) + f"[lease_heartbeat] internal error:{e} {traceback.format_exc()}" ) break time.sleep(elastic_ttl / 3) @@ -309,9 +305,7 @@ def endpoints_call_back(event): edps = value.decode() if value is not None else '' self.dist_endpoints, self.trainers = edps.split('|') logger.info( - "set DISTRIBUTED_TRAINER_ENDPOINTS {} ".format( - self.dist_endpoints - ) + f"set DISTRIBUTED_TRAINER_ENDPOINTS {self.dist_endpoints} " ) logger.info(f"set PADDLE_TRAINERS {self.trainers} ") @@ -472,9 +466,7 @@ def _update_fault_tolrance(self): os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'] = self.dist_endpoints os.environ['PADDLE_TRAINERS'] = self.trainers logger.info( - "update env DISTRIBUTED_TRAINER_ENDPOINTS {} ".format( - self.dist_endpoints - ) + f"update env DISTRIBUTED_TRAINER_ENDPOINTS {self.dist_endpoints} " ) logger.info(f"update env PADDLE_TRAINERS {self.trainers} ") return @@ -502,9 +494,9 @@ def _update_elastic_scale_out(self): if curr_host_port not in host_endpoints: host_endpoints.append(curr_host_port) - os.environ['PADDLE_TRAINER_ID'] = '{}'.format( - host_endpoints.index(self.curr_host) - ) + os.environ[ + 'PADDLE_TRAINER_ID' + ] = f'{host_endpoints.index(self.curr_host)}' hosts = ','.join( [host_port.split(":")[0] for host_port in host_endpoints] ) @@ -555,9 +547,9 @@ def _update_elastic_scale_in(self): ) self.args.ips = hosts - os.environ['PADDLE_TRAINER_ID'] = '{}'.format( - sorted_endpoints.index(self.curr_host) - ) + os.environ[ + 'PADDLE_TRAINER_ID' + ] = f'{sorted_endpoints.index(self.curr_host)}' os.environ['PADDLE_TRAINERS'] = hosts self.np = len(sorted_endpoints) os.environ['PADDLE_TRAINER_ENDPOINTS'] = ','.join(sorted_endpoints) diff --git a/python/paddle/distributed/fleet/launch.py b/python/paddle/distributed/fleet/launch.py index 4a334281e90d1..5a6b5665647de 100755 --- a/python/paddle/distributed/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -274,9 +274,7 @@ def get_cluster_from_args(args, device_mode, devices_per_proc): node_rank = node_ips.index(node_ip) logger.debug( - "parsed from args: node_ips:{} node_ip:{} node_rank:{}".format( - node_ips, node_ip, node_rank - ) + f"parsed from args: node_ips:{node_ips} node_ip:{node_ip} node_rank:{node_rank}" ) free_ports = None diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 2b48f29c57018..ac51a9b8a08bb 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -583,8 +583,8 @@ def start_local_trainers( ) logger.info( "details about PADDLE_TRAINER_ENDPOINTS can be found in " - "{}/endpoints.log, and detail running logs maybe found in " - "{}/workerlog.0".format(log_dir, log_dir) + f"{log_dir}/endpoints.log, and detail running logs maybe found in " + f"{log_dir}/workerlog.0" ) fn = None pre_fn = None if os.name == 'nt' else os.setsid @@ -699,20 +699,16 @@ def get_gpus(gpus): for x in gpus.split(','): assert x in cuda_visible_devices_list, ( "Can't find " - "your gpus {} in CUDA_VISIBLE_DEVICES[{}].".format( - x, cuda_visible_devices - ) + f"your gpus {x} in CUDA_VISIBLE_DEVICES[{cuda_visible_devices}]." ) res_gpus = [ cuda_visible_devices_list.index(x.strip()) for x in gpus.split(',') ] logger.info( - "Change selected_gpus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "CUDA_VISIBLE_DEVICES:{}".format( - gpus, res_gpus, cuda_visible_devices_list - ) + f"Change selected_gpus into reletive values. --ips:{gpus} " + f"will change into relative_ips:{res_gpus} according to your " + f"CUDA_VISIBLE_DEVICES:{cuda_visible_devices_list}" ) return res_gpus @@ -734,21 +730,16 @@ def get_xpus(xpus): for x in xpus.split(','): assert x in xpu_visible_devices_list, ( "Can't find " - "your xpus {} in XPU_VISIBLE_DEVICES[{}].".format( - x, - xpu_visible_devices, - ) + f"your xpus {x} in XPU_VISIBLE_DEVICES[{xpu_visible_devices}]." ) res_xpus = [ xpu_visible_devices_list.index(x.strip()) for x in xpus.split(',') ] logger.info( - "Change selected_xpus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "XPU_VISIBLE_DEVICES:{}".format( - xpus, res_xpus, xpu_visible_devices_list - ) + f"Change selected_xpus into reletive values. --ips:{xpus} " + f"will change into relative_ips:{res_xpus} according to your " + f"XPU_VISIBLE_DEVICES:{xpu_visible_devices_list}" ) return res_xpus @@ -826,9 +817,7 @@ def get_device_proc_info(args): devices_per_proc = list(range(0, args.nproc_per_node)) else: raise AssertionError( - "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format( - device_mode - ) + f"Can't support device_mode:{device_mode}, support only cpu|gpu|xpu now." ) return (device_mode, devices_per_proc) @@ -965,10 +954,8 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode): ), "ranks length should be equal to ips length." logger.debug( - "parsed from args: node_ips:{} node_ip:{} " - "node_rank:{} node_ranks:{}".format( - node_ips, node_ip, node_rank, node_ranks[node_rank] - ) + f"parsed from args: node_ips:{node_ips} node_ip:{node_ip} " + f"node_rank:{node_rank} node_ranks:{node_ranks[node_rank]}" ) # NOTE: there are different number of global mapped ranks on each node. @@ -1102,10 +1089,8 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): ), "ranks length should be equal to ips length." logger.debug( - "parsed from args: node_ips:{} node_ip:{} " - "node_rank:{} node_ranks:{}".format( - node_ips, node_ip, node_rank, node_ranks[node_rank] - ) + f"parsed from args: node_ips:{node_ips} node_ip:{node_ip} " + f"node_rank:{node_rank} node_ranks:{node_ranks[node_rank]}" ) # NOTE: there are different number of global mapped ranks on each node. @@ -1515,20 +1500,14 @@ def start_ps(self): for i in range(len(self.server_endpoints_ips)): if ip == self.server_endpoints_ips[i]: server = Trainer() - server.endpoint = "{}:{}".format( - ip, - self.server_endpoints_port[i], - ) + server.endpoint = f"{ip}:{self.server_endpoints_port[i]}" server.rank = server_rank server_rank += 1 pod.servers.append(server) for j in range(len(self.worker_endpoints_ips)): if ip == self.worker_endpoints_ips[j]: worker = Trainer() - worker.endpoint = "{}:{}".format( - ip, - self.worker_endpoints_port[j], - ) + worker.endpoint = f"{ip}:{self.worker_endpoints_port[j]}" worker.rank = worker_rank worker.stage = 1 worker_rank += 1 @@ -1536,9 +1515,8 @@ def start_ps(self): for m in range(len(self.coordinator_endpoints_ips)): if ip == self.coordinator_endpoints_ips[m]: coordinator = Trainer() - coordinator.endpoint = "{}:{}".format( - ip, - self.coordinator_endpoints_port[m], + coordinator.endpoint = ( + f"{ip}:{self.coordinator_endpoints_port[m]}" ) coordinator.rank = coordinator_rank coordinator.stage = 1 diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py index c24062c1f392b..67b88cb52ab45 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py @@ -387,10 +387,8 @@ def __init__( self.gather_output = gather_output assert out_features % self.world_size == 0, ( - "Number of column of the weight for linear ({}) must be" - " divisible by model parallel size ({})".format( - out_features, self.world_size - ) + f"Number of column of the weight for linear ({out_features}) must be" + f" divisible by model parallel size ({self.world_size})" ) self.output_size_per_partition = out_features // self.world_size @@ -631,10 +629,8 @@ def __init__( paddle.in_dynamic_mode() ), "mp_async_allreduce, mp_skip_c_identity and mp_fused_linear_param_grad_add are only available under dygraph mode" assert in_features % self.world_size == 0, ( - "Number of row of the weight for linear ({}) must be" - " divisible by model parallel size ({})".format( - in_features, self.world_size - ) + f"Number of row of the weight for linear ({in_features}) must be" + f" divisible by model parallel size ({self.world_size})" ) self.input_size_per_partition = in_features // self.world_size diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index 27d8a32f0693f..5a726dd5ab141 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -429,10 +429,8 @@ def _c_softmax_with_cross_entropy( label_dims = len(list(label.shape)) if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( - 'Expected input_dims - 1 = label_dims or input_dims == label_dims\ - (got input_dims{}, label_dims{})'.format( - input_dims, label_dims - ) + f'Expected input_dims - 1 = label_dims or input_dims == label_dims\ + (got input_dims{input_dims}, label_dims{label_dims})' ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=-1) @@ -842,9 +840,7 @@ def split( ] assert operation in supported_operations, ( "The operation for " - "paddle.distributed.split must be one of {}.".format( - supported_operations - ) + f"paddle.distributed.split must be one of {supported_operations}." ) if in_dynamic_mode(): raise ValueError( @@ -872,9 +868,7 @@ def split( ) assert size[0] % num_partitions == 0, ( "The length of the vocabulary must be divisible by num_partitions " - "but received vocabulary={} num_partitions={}".format( - size[0], num_partitions - ) + f"but received vocabulary={size[0]} num_partitions={num_partitions}" ) per_part_size = size[0] // num_partitions @@ -893,10 +887,8 @@ def split( should_split = False if axis == 0: assert size[0] % num_partitions == 0, ( - "Number of rows of the weight for linear ({}) must be" - " divisible by num_partitions ({})".format( - size[0], num_partitions - ) + f"Number of rows of the weight for linear ({size[0]}) must be" + f" divisible by num_partitions ({num_partitions})" ) per_part_size = size[0] // num_partitions linear_size = (per_part_size, size[1]) @@ -905,17 +897,15 @@ def split( elif axis == 1: assert size[1] % num_partitions == 0, ( - "Number of column of the weight for linear ({}) must be" - " divisible by num_partitions ({})".format( - size[1], num_partitions - ) + f"Number of column of the weight for linear ({size[1]}) must be" + f" divisible by num_partitions ({num_partitions})" ) per_part_size = size[1] // num_partitions linear_size = (size[0], per_part_size) else: raise ValueError( "The value of axis must be 0 or 1, but the value " - "given is {}.".format(axis) + f"given is {axis}." ) linear_out = _parallel_linear( diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py index 1190a03774129..071e1a07ce027 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py @@ -206,9 +206,7 @@ def _partition_parameters(self): numel = reduce(lambda x, y: x * y, param.shape, 1) assert ( numel > 0 - ), "param [{}] should larger than 0, but it is [{}]".format( - param.name, numel - ) + ), f"param [{param.name}] should larger than 0, but it is [{numel}]" sizes[rank] += numel return mapping @@ -341,9 +339,7 @@ def step(self): and param.regularizer is not None ): raise ValueError( - "param {} should not has the regularizer attribute".format( - param.name - ) + f"param {param.name} should not has the regularizer attribute" ) if param.stop_gradient: continue @@ -406,9 +402,7 @@ def _set_inner_opt_attr(self, attr_name, value): inner_opt_name = '_inner_opt' if not isinstance(attr_name, str): raise TypeError( - "attr_name should be str type, but is {}".format( - type(attr_name) - ) + f"attr_name should be str type, but is {type(attr_name)}" ) while hasattr(inner_opt, attr_name): setattr(inner_opt, attr_name, value) diff --git a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py index 79bcc134656f5..45279379ec3fd 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py +++ b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py @@ -53,16 +53,12 @@ def _can_update(self, optimizer): def _disable_strategy(self, dist_strategy): raise NotImplementedError( - "you should implement disable strategy in {}".format( - type(self).__name__ - ) + f"you should implement disable strategy in {type(self).__name__}" ) def _enable_strategy(self, dist_strategy, context=None): raise NotImplementedError( - "you should implement enable strategy in {}".format( - type(self).__name__ - ) + f"you should implement enable strategy in {type(self).__name__}" ) def apply_gradients(self, params_grads): diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py index 5c8038c221558..0af5824ce3b6f 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py @@ -103,7 +103,7 @@ def prune_fp16(block, shard, reduced_grads_to_param, ring_ids): if param_name not in shard.global_params: raise ValueError( "Output 'X' of cast_op must be a grad of" - "model param, but {} is not a grad".format(output_name) + f"model param, but {output_name} is not a grad" ) if output_name in reduced_grads_to_param: continue @@ -131,7 +131,7 @@ def prune_fp16(block, shard, reduced_grads_to_param, ring_ids): if param_name not in shard.global_params: raise ValueError( "Input 'X' of check_finite_and_unscale must" - "be grads, but {} is not a grad".format(input_name) + f"be grads, but {input_name} is not a grad" ) if shard.has_param(param_name): reversed_x.append(input_name) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index 44a584ac6d0b2..f3301cbeb24d5 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -167,14 +167,14 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): _status = dp_grads_status[var_name] if _status == -1: raise ValueError( - "{} is not generated, but you are" - "trying to all-reduce it".format(var_name) + f"{var_name} is not generated, but you are" + "trying to all-reduce it" ) if _status == 0: raise ValueError( "There should be a sync_calc op " - "after generate Var: {} and before the" - "c_allreduce_sum op".format(var_name) + f"after generate Var: {var_name} and before the" + "c_allreduce_sum op" ) assert _status == 1 if var_name in vars_status: @@ -212,7 +212,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): if vars_status[input_name] != 3: raise ValueError( "There should be a sync_comm op " - "after allreduce the Var: {}".format(input_name) + f"after allreduce the Var: {input_name}" ) raise ValueError( "The reduce output grad [{}] should NOT be be used in Non-root rank.".format( @@ -224,13 +224,13 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): if dp_grads_status[input_name] != 3: raise ValueError( "There should be a sync_comm op " - "after allreduce the Var: {}".format(input_name) + f"after allreduce the Var: {input_name}" ) else: if dp_grads_status[input_name] != 5: raise ValueError( "The grad in shard should be allreduce and sync" - "twice before usage {}".format(input_name) + f"twice before usage {input_name}" ) for output_name in op.desc.output_arg_names(): @@ -538,9 +538,7 @@ def insert_fused_reduce_ops( root_id = get_grad_device(var, shard) assert 0 <= root_id < nranks, ( "root_id should >=0 and < nranks, " - "but now nranks={}, the root_id of var={} is {}".format( - nranks, var, root_id - ) + f"but now nranks={nranks}, the root_id of var={var} is {root_id}" ) device_to_vars[root_id].append(var) @@ -621,9 +619,7 @@ def insert_reduce_ops( root_id = get_grad_device(grad_var, shard) assert ( root_id >= 0 - ), "root id should be a positive int, but now root id is {}".format( - root_id - ) + ), f"root id should be a positive int, but now root id is {root_id}" if rank is not None and rank == root_id: grad_in_this_device.append(var) block._insert_op_without_sync( @@ -660,9 +656,7 @@ def insert_fused_broadcast_param_ops( root_id = shard.device(var) assert 0 <= root_id < nranks, ( "root_id should >=0 and < nranks, " - "but now nranks={}, the root_id of var={} is {}".format( - nranks, var, root_id - ) + f"but now nranks={nranks}, the root_id of var={var} is {root_id}" ) device_to_vars[root_id].append(var) @@ -731,9 +725,7 @@ def insert_broadcast_param_ops( root_id = shard.device(param) assert ( root_id >= 0 - ), "root id should be a positive int, but now root id is {}".format( - root_id - ) + ), f"root id should be a positive int, but now root id is {root_id}" if rank is not None and rank == root_id: param_in_this_device.append(param) block._insert_op_without_sync( @@ -801,9 +793,7 @@ def fuse_opt_broadcast_param_ops( def get_grad_device(grad_name, shard): - assert "@GRAD" in grad_name, "[{}] should be a grad variable.".format( - grad_name - ) + assert "@GRAD" in grad_name, f"[{grad_name}] should be a grad variable." base_name = None # NOTE: mind the traversal order possible_suffixes = [ @@ -905,7 +895,7 @@ def insert_scale_loss_grad_ops(block, scale=1.0): if is_loss_grad_op(op): assert op.type == 'fill_constant', ( "loss_grad_op must be fill_constant op, " - "but this op is {}".format(op.type) + f"but this op is {op.type}" ) assert op.has_attr('value') loss_scale = float(op.attr('value')) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py index 0a841cf243d14..2ff259be18b79 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py @@ -33,7 +33,7 @@ def prune_weight_decay(self, block, shard): if OP_ROLE_VAR_KEY not in op.attr_names: raise ValueError( "The Weight Dacay op should hold op_role_var attribute" - "but the {} op does not hold op_role_var".format(op.type) + f"but the {op.type} op does not hold op_role_var" ) op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY] if not shard.has_param(op_role_var[0]): diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py index be6aad3208d09..1ee99b10854b9 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py @@ -248,9 +248,7 @@ def _get_hybrid_dp_mode(self): self.scale_gradient = gradient_scale_configs['scale_gradient'] if gm_acc_step > 1: logger.info( - "Gradient merge in [{}], acc step = [{}]".format( - gm_mode, gm_acc_step - ) + f"Gradient merge in [{gm_mode}], acc step = [{gm_acc_step}]" ) optimizer_sharding = False @@ -865,9 +863,7 @@ def _split_program(self, block): ) assert ( input_name not in self._forward_remain_anchors - ), "segment anchor [{}] met twice !".format( - input_name - ) + ), f"segment anchor [{input_name}] met twice !" self._backward_remain_anchors.remove(input_name) self._forward_remain_anchors.append(input_name) elif int(op.attr('op_role')) == int(OpRole.Forward): @@ -1766,9 +1762,7 @@ def create_persistable_gradients_and_insert_merge_ops( for grad_name in grad_names: assert ( get_grad_device(grad_name, shard) == shard.worker_idx - ), "try to merge gradient not belong to current shard: [{}]".format( - grad_name - ) + ), f"try to merge gradient not belong to current shard: [{grad_name}]" persistable_grad_name = grad_name + '@GradiantMerge' assert ( grad_name not in self._grad2merged_grad diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index 8773f9d82ca8b..4222d80a4e374 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -120,9 +120,7 @@ def check_sanity(): assert part >= 0, f"part[{part}] should be greater than 0" assert ( part <= self.num_items - ), "part[{}] should be less than num_items[{}]".format( - part, self.num_items - ) + ), f"part[{part}] should be less than num_items[{self.num_items}]" check_sanity() @@ -391,10 +389,8 @@ def __init__( # construct default topology if world_size % num_stages != 0: raise ValueError( - "should provide correct num_stages({}) " - "which can be divided by world_size({})".format( - num_stages, world_size - ) + f"should provide correct num_stages({num_stages}) " + f"which can be divided by world_size({world_size})" ) dp_num = world_size // num_stages self._topo = fleet.CommunicateTopology( @@ -754,10 +750,8 @@ def forward(self, input, chunk_id=None): self._num_virtual_pipeline_stages > 1 ), "chunk_id is only valid when using virtual pipeline stage" assert chunk_id < len(self._model_chunks), ( - "The virtual pipeline only has {} chunks, " - "but received chunk_id {}.".format( - len(self._model_chunks), chunk_id - ) + f"The virtual pipeline only has {len(self._model_chunks)} chunks, " + f"but received chunk_id {chunk_id}." ) # Get the target model chunk. model_chunk = self._model_chunks[chunk_id] diff --git a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py index d90185d36d466..a3e5b406be79e 100755 --- a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py @@ -250,9 +250,7 @@ def __init__(self, layers, hcg, strategy): self._compute_loss = True logger.info( - "Pipeline Info -- num_stages: {}, stage_id: {}".format( - self.num_stages, self.stage_id - ) + f"Pipeline Info -- num_stages: {self.num_stages}, stage_id: {self.stage_id}" ) if self.use_model_parallel: diff --git a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py index bdbdc797b2b7b..4600b78702b75 100644 --- a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py +++ b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py @@ -112,9 +112,7 @@ def forward( or 'xpu:' in paddle.get_device() or cur_device.split(':')[0] in paddle.device.get_all_custom_device_type() - ), "Recompute with RNG is not support current device: {}.".format( - cur_device - ) + ), f"Recompute with RNG is not support current device: {cur_device}." # TODO support AMP tracer = framework._dygraph_tracer() diff --git a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py index ee3bd60b46b9e..865571cfeca6f 100644 --- a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py +++ b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py @@ -111,9 +111,7 @@ def _in_varnames(var): var_path = os.path.join(dirname, origin_varname) if not os.path.exists(var_path): raise ValueError( - "SelectedRows var {} can not find at {}".format( - new_var.name, var_path - ) + f"SelectedRows var {new_var.name} can not find at {var_path}" ) if os.path.isfile(var_path): @@ -309,9 +307,7 @@ def _get_executor(self): ) if heter_worker_device_guard not in ["GPU", "XPU", "CPU"]: raise ValueError( - "Heter Worker Not Support Device {}".format( - heter_worker_device_guard - ) + f"Heter Worker Not Support Device {heter_worker_device_guard}" ) if self.role_maker._is_heter_worker(): if heter_worker_device_guard == "GPU": diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 43440ba37fc3a..a14c337a4fad1 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -480,9 +480,7 @@ def to_string(self, indent): attrs += f"fetch_var_name: \"{str(self.fetch_var_name)}\" " attrs += f"startup_program_id: {str(self.startup_program_id)} " attrs += f"main_program_id: {str(self.main_program_id)} " - attrs += "tensor_table_class: \"{}\" ".format( - str(self.tensor_table_class) - ) + attrs += f"tensor_table_class: \"{str(self.tensor_table_class)}\" " attrs += "\n" return program_str.format( conv_indent(indent), attrs, conv_indent(indent) @@ -898,9 +896,7 @@ def _get_executor(self): heter_device_type = self.role_maker._heter_device_type().upper() if heter_device_type not in ["GPU", "XPU", "CPU"]: raise ValueError( - "Heter Worker Not Support Device {}".format( - heter_device_type - ) + f"Heter Worker Not Support Device {heter_device_type}" ) if heter_device_type == "GPU": executor = Executor( diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index 770cef9c551e6..11617981d9d4b 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -171,9 +171,7 @@ def mkdirs(self, fs_path): client.mkdirs("test_mkdirs") client.delete("test_mkdirs") """ - assert not os.path.isfile(fs_path), "{} is already a file".format( - fs_path - ) + assert not os.path.isfile(fs_path), f"{fs_path} is already a file" os.makedirs(fs_path, exist_ok=True) def rename(self, fs_src_path, fs_dst_path): @@ -401,9 +399,7 @@ def handler(*args, **kwargs): except ExecuteError as e: if time.time() - start >= time_out: raise FSTimeOut( - "args:{} timeout:{}".format( - args, time.time() - start - ) + f"args:{args} timeout:{time.time() - start}" ) time.sleep(inter) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index a5723f856e661..9c44fc49fff67 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -272,9 +272,9 @@ def _init_communication_group(self): dev_ids.append(cur_id) num_pp = len(dev_ids) num_pp = max(1, num_pp) - assert num_pp == self.num_pp, 'num_pp: {}, self.num_pp: {}'.format( - num_pp, self.num_pp - ) + assert ( + num_pp == self.num_pp + ), f'num_pp: {num_pp}, self.num_pp: {self.num_pp}' collective_helper = fleet.meta_optimizers.common.CollectiveHelper( self.role_maker, wait_port=False @@ -533,9 +533,7 @@ def _check_validation(self, block): ) device = op.attr(self._op_device_key) - assert device, "{} has no {} set.".format( - op.type, self._op_device_key - ) + assert device, f"{op.type} has no {self._op_device_key} set." if device.split(':')[1] == "all": continue diff --git a/python/paddle/distributed/fleet/utils/pp_parallel_adaptor.py b/python/paddle/distributed/fleet/utils/pp_parallel_adaptor.py index 1a8dd92fc8518..ad640a7200d0d 100644 --- a/python/paddle/distributed/fleet/utils/pp_parallel_adaptor.py +++ b/python/paddle/distributed/fleet/utils/pp_parallel_adaptor.py @@ -544,9 +544,9 @@ def parse_args(): if args.dst_pp is None: args.dst_pp = args.src_pp - assert args.src_mp == args.dst_mp, "src mp {} dst mp {}".format( - args.src_mp, args.dst_mp - ) + assert ( + args.src_mp == args.dst_mp + ), f"src mp {args.src_mp} dst mp {args.dst_mp}" assert args.method in [ 'peek_model', diff --git a/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py b/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py index ae5dec21b4e8f..1b492bb00f560 100644 --- a/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py +++ b/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py @@ -258,10 +258,8 @@ def __init__( self.gather_output = gather_output assert out_features % self.world_size == 0, ( - "Number of column of the weight for linear ({}) must be" - " divisible by model parallel size ({})".format( - out_features, self.world_size - ) + f"Number of column of the weight for linear ({out_features}) must be" + f" divisible by model parallel size ({self.world_size})" ) self.output_size_per_partition = out_features // self.world_size @@ -380,10 +378,8 @@ def __init__( self.is_mp = self.world_size > 1 assert in_features % self.world_size == 0, ( - "Number of row of the weight for linear ({}) must be" - " divisible by model parallel size ({})".format( - in_features, self.world_size - ) + f"Number of row of the weight for linear ({in_features}) must be" + f" divisible by model parallel size ({self.world_size})" ) self.input_size_per_partition = in_features // self.world_size diff --git a/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py b/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py index e370042927434..9ca0a7fdfc89f 100644 --- a/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py +++ b/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py @@ -268,9 +268,7 @@ def insert_synchronization( assert ( len(unsync_param_names) == 0 - ), "The following param is unsync by some error: {}".format( - unsync_param_names - ) + ), f"The following param is unsync by some error: {unsync_param_names}" def add_extra_synchronization( @@ -308,9 +306,7 @@ def add_extra_synchronization( logger.info("Constructing Extra Parameter Synchronization.") logger.info( - "Tensor Parallel Degree: {}, Synchronization mode: {}".format( - tp_degree, sync_mode - ) + f"Tensor Parallel Degree: {tp_degree}, Synchronization mode: {sync_mode}" ) # adopt for pipeline opt diff --git a/python/paddle/distributed/launch/context/device.py b/python/paddle/distributed/launch/context/device.py index 5126f687ccb0a..047812ea965e8 100644 --- a/python/paddle/distributed/launch/context/device.py +++ b/python/paddle/distributed/launch/context/device.py @@ -132,8 +132,8 @@ def get_custom_devices_count(device_type): custom_device_type = os.getenv('PADDLE_XCCL_BACKEND') dev._dtype = DeviceType.CUSTOM_DEVICE num = get_custom_devices_count(custom_device_type) - visible_devices_str = '{}_VISIBLE_DEVICES'.format( - custom_device_type.upper() + visible_devices_str = ( + f'{custom_device_type.upper()}_VISIBLE_DEVICES' ) if visible_devices_str in os.environ: visible_devices = os.getenv(visible_devices_str) diff --git a/python/paddle/distributed/launch/controllers/collective.py b/python/paddle/distributed/launch/controllers/collective.py index 35bd244bb2f89..13d8ef403504a 100644 --- a/python/paddle/distributed/launch/controllers/collective.py +++ b/python/paddle/distributed/launch/controllers/collective.py @@ -75,9 +75,7 @@ def _build_pod_with_tuner(self): "PADDLE_CURRENT_ENDPOINT": endpoint, "FLAGS_selected_gpus": "0", "PADDLE_AUTO_PARALLEL_STAGE": "tuner", - "PADDLE_GLOBAL_SIZE": "{}".format( - pod_replicas * int(self.ctx.args.nnodes) - ), + "PADDLE_GLOBAL_SIZE": f"{pod_replicas * int(self.ctx.args.nnodes)}", "PADDLE_LOCAL_SIZE": f"{pod_replicas}", } log_file = "tuner.log" diff --git a/python/paddle/distributed/launch/job/pod.py b/python/paddle/distributed/launch/job/pod.py index 85cf1fed34be4..30159482081d2 100644 --- a/python/paddle/distributed/launch/job/pod.py +++ b/python/paddle/distributed/launch/job/pod.py @@ -46,8 +46,8 @@ def __init__(self): super().__init__() def __str__(self): - return "Pod: {}, replicas {}, status {}".format( - self.name, self.replicas, self.status + return ( + f"Pod: {self.name}, replicas {self.replicas}, status {self.status}" ) def failed_container(self): diff --git a/python/paddle/distributed/launch/plugins/__init__.py b/python/paddle/distributed/launch/plugins/__init__.py index 23e58b0e65f79..158abb7a5d9b7 100644 --- a/python/paddle/distributed/launch/plugins/__init__.py +++ b/python/paddle/distributed/launch/plugins/__init__.py @@ -71,9 +71,7 @@ def test_mode(ctx): ctx.logger.info('Paddle Distributed Test begin...') if int(ctx.args.nnodes) < 2: ctx.args.nnodes = 2 - ctx.args.training_script = '{}/test.py'.format( - os.path.dirname(__file__) - ) + ctx.args.training_script = f'{os.path.dirname(__file__)}/test.py' enabled_plugins = [ diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 843c9eb5d9c0e..3815d0f475fbe 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -700,8 +700,8 @@ def __init__(self): # imperative only support one gpu or xpu if self._device_type != "": - FLAGS_selected_custom_devices = 'FLAGS_selected_{}s'.format( - self._device_type + FLAGS_selected_custom_devices = ( + f'FLAGS_selected_{self._device_type}s' ) selected_custom_devices = os.getenv( FLAGS_selected_custom_devices, "0" @@ -1014,8 +1014,8 @@ def train(): ) if backend == "xccl": - FLAGS_selected_custom_devices = 'FLAGS_selected_{}s'.format( - parallel_env.device_type + FLAGS_selected_custom_devices = ( + f'FLAGS_selected_{parallel_env.device_type}s' ) _check_var_exists(FLAGS_selected_custom_devices) else: diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 322adfb5da310..53bdca47c48a5 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -215,9 +215,7 @@ def build_state(self): fwd_op_id = self.grad_op_to_op_map[ op.desc.original_id() ] - assert fwd_op_id in self._op_fp16_dict, "{}".format( - str(op) - ) + assert fwd_op_id in self._op_fp16_dict, f"{str(op)}" self._op_fp16_dict[ op.desc.original_id() ] = self._is_fp16_op(fwd_op_id) @@ -390,17 +388,13 @@ def _cast_block(self, block): for in_var_name in op.input_arg_names: assert ( in_var.dtype == block.var(in_var_name).dtype - ), "{}, {}, {}".format( - in_var, block.var(in_var_name), str(op) - ) + ), f"{in_var}, {block.var(in_var_name)}, {str(op)}" out_var.desc.set_dtype(in_var.dtype) elif int(op.attr('op_role')) == 257: pass else: raise ValueError( - "'{}' op is not supported in the complete amp pass.".format( - op.type - ) + f"'{op.type}' op is not supported in the complete amp pass." ) idx += num_cast_ops + 1 block._sync_with_cpp() diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index acc99d562aac4..3cda24f1a0f64 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -217,7 +217,7 @@ def _scale_backward_initial_grad(self): if is_loss_grad_op(op): assert op.type == 'fill_constant', ( "loss_grad_op must be fill_constant op, " - "but this op is {}".format(op.type) + f"but this op is {op.type}" ) assert op.has_attr('value') loss_scale = float(op.attr('value')) @@ -498,9 +498,7 @@ def _update_program(self, grad_groups): allreduce_op = block.ops[group.allreduce_op_idx] assert ( allreduce_op.type == 'c_allreduce_sum' - ), "should found c_allreduce_sum op but found {}".format( - str(allreduce_op) - ) + ), f"should found c_allreduce_sum op but found {str(allreduce_op)}" allreduce_op_dist_attr = ( self.dist_context.get_op_dist_attr_for_program(allreduce_op) ) @@ -699,9 +697,7 @@ def summary(self, grad_groups=[]): fused_grads ) self._logger.debug( - "the following [{}] gradients are not fused: ".format( - len(individual_grads) - ) + f"the following [{len(individual_grads)}] gradients are not fused: " ) self._logger.debug(f"individual gradient {individual_grads}") @@ -764,9 +760,7 @@ def add(self, grad_var, ring_id, i): grad_op = self.ops[grad_op_idx] assert ( grad_var.name in grad_op.output_arg_names - ), "grad [{}] should be output of {}".format( - grad_var.name, str(grad_op) - ) + ), f"grad [{grad_var.name}] should be output of {str(grad_op)}" self.coalesce_op_idx = grad_op_idx def finalize(self): diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index 8f9927ae37c28..82475251ee516 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -368,9 +368,7 @@ def cast_block(self, block): for in_var_name in op.input_arg_names: assert ( in_var.dtype == block.var(in_var_name).dtype - ), "{}, {}, {}".format( - in_var, block.var(in_var_name), str(op) - ) + ), f"{in_var}, {block.var(in_var_name)}, {str(op)}" out_var.desc.set_dtype(in_var.dtype) idx += num_cast_ops + 1 @@ -479,9 +477,7 @@ def _insert_backward_cast_ops( out_var = block.var(out_var_name) if _keep_fp32_output(op, out_var.name): continue - assert out_var.dtype == dst_dtype, "{}, {}".format( - str(out_var), dst_dtype - ) + assert out_var.dtype == dst_dtype, f"{str(out_var)}, {dst_dtype}" for ( cast_name, @@ -495,9 +491,7 @@ def _insert_backward_cast_ops( if slot_name in op.input_names: assert src_name in op.input( slot_name - ), "var: {} not in op's {}. {}".format( - src_name, slot_name, str(op) - ) + ), f"var: {src_name} not in op's {slot_name}. {str(op)}" src_var_dist_attr = grad_op_attr.get_input_dist_attr(src_name) assert src_var_dist_attr is not None op._rename_input(src_name, cast_name) @@ -770,9 +764,7 @@ def _apply_single_impl(self, main_program, startup_program, context): else: raise NotImplementedError( - "target dtype [{}] is for amp o2 not supported yet.".format( - self.target_dtype - ) + f"target dtype [{self.target_dtype}] is for amp o2 not supported yet." ) global __target_dtype__ __target_dtype__ = __target_dtype diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index 3bee97a90dbc4..327b208518ee8 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -280,9 +280,7 @@ def _partition_parameters(self, params): numel = reduce(lambda x, y: x * y, param.shape, 1) assert ( numel > 0 - ), "param [{}] should larger than 0, but it is [{}]".format( - param.name, numel - ) + ), f"param [{param.name}] should larger than 0, but it is [{numel}]" sizes[rank] += numel return mapping diff --git a/python/paddle/distributed/passes/auto_parallel_pipeline.py b/python/paddle/distributed/passes/auto_parallel_pipeline.py index 9e2a06778854a..1b14560078fad 100644 --- a/python/paddle/distributed/passes/auto_parallel_pipeline.py +++ b/python/paddle/distributed/passes/auto_parallel_pipeline.py @@ -85,7 +85,7 @@ def _apply_single_impl(self, main_program, startup_program, context): else: raise ValueError( "Now only 'F-then-B', '1F1B' and 'stream' are supported." - "The given value is {}.".format(self._mode) + f"The given value is {self._mode}." ) def _insert_sync_ops_for_stream(self): diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 41a4b357c7080..f7b211fdc4ba4 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -804,10 +804,7 @@ def _fuse_overlap_parameter_comm_stage_two(self, sharding_info): ) ) _logger.debug( - "Bucket[{}] parameters: {}.".format( - i, - [p.name for p in param_group.vars], - ) + f"Bucket[{i}] parameters: {[p.name for p in param_group.vars]}." ) broadcast_var_to_group_map[ @@ -1647,9 +1644,7 @@ def partition_by_greedy_even(params, group_size): numel = reduce(lambda x, y: x * y, param.shape, 1) assert ( numel > 0 - ), "param [{}] should larger than 0, but it is [{}]".format( - param.name, numel - ) + ), f"param [{param.name}] should larger than 0, but it is [{numel}]" sizes[rank] += numel return mapping @@ -1664,9 +1659,7 @@ def partition_parameters(params, group_size, algor="greedy_even"): _logger.info("Sharding Parameter Partition:") for k, v in rank_to_params.items(): _logger.info( - "Rank:{}, Parameter Size:{} MB.".format( - k, sum([get_var_size(var) for var in v]) - ) + f"Rank:{k}, Parameter Size:{sum([get_var_size(var) for var in v])} MB." ) _logger.info(f"Params in this rank: {[var.name for var in v]}.") diff --git a/python/paddle/distributed/passes/pipeline_scheduler_pass.py b/python/paddle/distributed/passes/pipeline_scheduler_pass.py index a473e7b095eaf..7e7c69b04fb43 100644 --- a/python/paddle/distributed/passes/pipeline_scheduler_pass.py +++ b/python/paddle/distributed/passes/pipeline_scheduler_pass.py @@ -405,9 +405,7 @@ def apply_pass(main_program, startup_program, pass_name, pass_attr={}): assert pass_name in [ "FThenB", "1F1B", - ], "pipeline scheduler only support FThenB and 1F1B, but recieve {}".format( - pass_name - ) + ], f"pipeline scheduler only support FThenB and 1F1B, but recieve {pass_name}" if pass_name == "1F1B": # TODO(Ruibiao): Move FLAGS_1f1b_backward_forward_overlap and diff --git a/python/paddle/distributed/ps/coordinator.py b/python/paddle/distributed/ps/coordinator.py index f433b58ae20dd..6926775241546 100755 --- a/python/paddle/distributed/ps/coordinator.py +++ b/python/paddle/distributed/ps/coordinator.py @@ -88,9 +88,7 @@ def select(self): self.parse_from_string() for client_id in self.clients_info: logger.info( - "fl-ps > client {} info : {}".format( - client_id, self.clients_info[client_id] - ) + f"fl-ps > client {client_id} info : {self.clients_info[client_id]}" ) # ......... to implement ...... # fl_strategy_desc = the_one_ps_pb2.FLStrategy() @@ -253,9 +251,7 @@ def pull_fl_strategy(self): self._client_ptr.pull_fl_strategy() ) # block: wait for coordinator's strategy arrived logger.info( - "fl-ps > fl client recved fl_strategy(str):\n{}".format( - fl_strategy_str - ) + f"fl-ps > fl client recved fl_strategy(str):\n{fl_strategy_str}" ) fl_strategy_desc = the_one_ps_pb2.FLStrategy() text_format.Parse( diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index 968b15817ed7a..a32451985472a 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -73,11 +73,7 @@ def check_embedding_dim(accessor_proto, varname, program_id, context): for var in main_program.list_vars(): if var.name == varname: embedding_dim = var.shape[1] - print( - 'new var: {}, {}, {}'.format( - var, embedding_dim, accessor_proto.fea_dim - ) - ) + print(f'new var: {var}, {embedding_dim}, {accessor_proto.fea_dim}') break fea_dim = accessor_proto.fea_dim diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 865de4c828308..43a1840584be6 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -651,9 +651,7 @@ def get_the_one_send_context(attrs, split_dense_table=False, ep_list=None): def find_heter_ops(program, default_device="cpu"): if default_device not in DEVICE_LIST: raise ValueError( - "Given device {} is not in device list {}".format( - default_device, DEVICE_LIST - ) + f"Given device {default_device} is not in device list {DEVICE_LIST}" ) def _is_heter_op(op, current_heter_device, default_device="cpu"): @@ -1153,12 +1151,12 @@ def get_communicate_var_info( input_var_reshape_name = [] if type == "forward": - block_input_var_name = "forward_joint_{}_{}@Heter".format( - block_index - 1, block_index + block_input_var_name = ( + f"forward_joint_{block_index - 1}_{block_index}@Heter" ) else: - block_input_var_name = "backward_joint_{}_{}@Heter".format( - block_index + 1, block_index + block_input_var_name = ( + f"backward_joint_{block_index + 1}_{block_index}@Heter" ) entrance_var_list.sort() diff --git a/python/paddle/distributed/utils/launch_utils.py b/python/paddle/distributed/utils/launch_utils.py index eb22fde7bc1e7..b06201dc89472 100644 --- a/python/paddle/distributed/utils/launch_utils.py +++ b/python/paddle/distributed/utils/launch_utils.py @@ -35,9 +35,7 @@ def get_cluster_from_args(args, selected_gpus): node_rank = node_ips.index(node_ip) logger.debug( - "parsed from args:node_ips:{} node_ip:{} node_rank:{}".format( - node_ips, node_ip, node_rank - ) + f"parsed from args:node_ips:{node_ips} node_ip:{node_ip} node_rank:{node_rank}" ) free_ports = None @@ -91,11 +89,9 @@ def get_gpus(selected_gpus): for x in selected_gpus.split(',') ] logger.info( - "Change selected_gpus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "CUDA_VISIBLE_DEVICES:{}".format( - selected_gpus, gpus, cuda_visible_devices_list - ) + f"Change selected_gpus into reletive values. --ips:{selected_gpus} " + f"will change into relative_ips:{gpus} according to your " + f"CUDA_VISIBLE_DEVICES:{cuda_visible_devices_list}" ) return gpus @@ -217,9 +213,7 @@ def __init__(self): self.rank = None def __str__(self): - return "gpu:{} endpoint:{} rank:{}".format( - self.gpus, self.endpoint, self.rank - ) + return f"gpu:{self.gpus} endpoint:{self.endpoint} rank:{self.rank}" def __eq__(self, t): if len(self.gpus) != len(t.gpus): diff --git a/python/paddle/distributed/utils/nccl_utils.py b/python/paddle/distributed/utils/nccl_utils.py index 2910bdd0a8d2e..16e445d54bb04 100644 --- a/python/paddle/distributed/utils/nccl_utils.py +++ b/python/paddle/distributed/utils/nccl_utils.py @@ -27,9 +27,7 @@ def get_nccl_version_str(ver): NCCL_MINOR_VERSION = int(ver // 100) NCCL_PATCH_VERSION = int(ver % 100) - return "{}.{}.{}".format( - NCCL_MAJOR_VERSION, NCCL_MINOR_VERSION, NCCL_PATCH_VERSION - ) + return f"{NCCL_MAJOR_VERSION}.{NCCL_MINOR_VERSION}.{NCCL_PATCH_VERSION}" def check_nccl_version_for_p2p(): diff --git a/python/paddle/fft.py b/python/paddle/fft.py index df9cc318830d4..9600f2159abf6 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -54,9 +54,7 @@ def _check_normalization(norm): if norm not in ['forward', 'backward', 'ortho']: raise ValueError( - "Unexpected norm: {}. Norm should be forward, backward or ortho".format( - norm - ) + f"Unexpected norm: {norm}. Norm should be forward, backward or ortho" ) @@ -79,7 +77,7 @@ def _check_fft_shape(x, s): if len(s) > ndim: raise ValueError( "Length of FFT argument s should not be larger than the rank of input. " - "Received s: {}, rank of x: {}".format(s, ndim) + f"Received s: {s}, rank of x: {ndim}" ) for size in s: if not isinstance(size, int) or size <= 0: @@ -92,9 +90,7 @@ def _check_fft_axis(x, axis): raise ValueError(f"Invalid FFT axis ({axis}), it shoule be an integer.") if axis < -ndim or axis >= ndim: raise ValueError( - "Invalid FFT axis ({}), it should be in range [-{}, {})".format( - axis, ndim, ndim - ) + f"Invalid FFT axis ({axis}), it should be in range [-{ndim}, {ndim})" ) @@ -102,14 +98,12 @@ def _check_fft_axes(x, axes): ndim = x.ndim if not isinstance(axes, Sequence): raise ValueError( - "Invalid FFT axes ({}), it should be a sequence of integers.".format( - axes - ) + f"Invalid FFT axes ({axes}), it should be a sequence of integers." ) if len(axes) > ndim: raise ValueError( "Length of fft axes should not be larger than the rank of input. " - "Received, len of axes: {}, rank of x: {}".format(len(axes), ndim) + f"Received, len of axes: {len(axes)}, rank of x: {ndim}" ) for axis in axes: if not isinstance(axis, int) or axis < -ndim or axis >= ndim: @@ -914,9 +908,7 @@ def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( - s - ) + f"Invalid FFT argument s ({s}), it should be a sequence of 2 integers." ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: @@ -984,9 +976,7 @@ def ifft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( - s - ) + f"Invalid FFT argument s ({s}), it should be a sequence of 2 integers." ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: @@ -1048,9 +1038,7 @@ def rfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( - s - ) + f"Invalid FFT argument s ({s}), it should be a sequence of 2 integers." ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: @@ -1104,9 +1092,7 @@ def irfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( - s - ) + f"Invalid FFT argument s ({s}), it should be a sequence of 2 integers." ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: @@ -1153,9 +1139,7 @@ def hfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( - s - ) + f"Invalid FFT argument s ({s}), it should be a sequence of 2 integers." ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: @@ -1216,9 +1200,7 @@ def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( - s - ) + f"Invalid FFT argument s ({s}), it should be a sequence of 2 integers." ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index b1401759c1aad..68971d85653d3 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -181,9 +181,9 @@ def _build_load_path_and_config(path, config): directory_format_exist = os.path.isdir(path) if prefix_format_exist and directory_format_exist: raise ValueError( - "The {}.pdmodel and {} directory exist at the same time, " + f"The {path}.pdmodel and {path} directory exist at the same time, " "don't know which one to load, please make sure that the specified target " - "of ``path`` is unique.".format(path, path) + "of ``path`` is unique." ) elif not prefix_format_exist and not directory_format_exist: error_msg = "The ``path`` (%s) to load model not exists." @@ -281,9 +281,7 @@ def _pickle_save(obj, f, protocol): # TODO(weixin):add support for BytesIO. if not isinstance(protocol, int): raise ValueError( - "The 'protocol' MUST be `int`, but received {}".format( - type(protocol) - ) + f"The 'protocol' MUST be `int`, but received {type(protocol)}" ) if protocol < 2 or protocol > 4: @@ -429,9 +427,7 @@ def _transformed_from_lodtensor(obj): def _to_LodTensor(ndarray): if not isinstance(ndarray, np.ndarray): raise TypeError( - 'Type of `ndarray` should be numpy.ndarray, but received {}.'.format( - type(ndarray) - ) + f'Type of `ndarray` should be numpy.ndarray, but received {type(ndarray)}.' ) t = core.LoDTensor() place = _current_expected_place() @@ -794,9 +790,7 @@ def save(obj, path, protocol=4, **configs): os.makedirs(dirname, exist_ok=True) elif not _is_memory_buffer(path): raise ValueError( - "only supports saving objects to file and `BytesIO`, but got {}".format( - type(path) - ) + f"only supports saving objects to file and `BytesIO`, but got {type(path)}" ) config = _parse_save_config(configs) @@ -846,9 +840,7 @@ def _legacy_save(obj, path, protocol=2): if not isinstance(protocol, int): raise ValueError( - "The 'protocol' MUST be `int`, but received {}".format( - type(protocol) - ) + f"The 'protocol' MUST be `int`, but received {type(protocol)}" ) if protocol < 2 or protocol > 4: @@ -1129,9 +1121,7 @@ def load(path, **configs): return program except: raise ValueError( - "`paddle.load` can not parse the file:{}.".format( - path - ) + f"`paddle.load` can not parse the file:{path}." ) else: diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 8d72d0215e93c..ef1260302d658 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -107,9 +107,7 @@ def get_rng_state(device=None): ) else: raise ValueError( - "get_rng_state is not implemented for current device: {}".format( - place - ) + f"get_rng_state is not implemented for current device: {place}" ) return state_list @@ -203,9 +201,7 @@ def set_rng_state(state_list, device=None): core.default_cpu_generator().set_state(state_list[0]) else: raise ValueError( - "set_rng_state is not implemented for current device: {}".format( - place - ) + f"set_rng_state is not implemented for current device: {place}" ) diff --git a/python/paddle/hapi/callbacks.py b/python/paddle/hapi/callbacks.py index 0f56648841496..1de70114db7e4 100644 --- a/python/paddle/hapi/callbacks.py +++ b/python/paddle/hapi/callbacks.py @@ -1300,9 +1300,7 @@ def on_eval_end(self, logs=None): lr = self.model._optimizer._learning_rate if not isinstance(lr, float): warnings.warn( - 'Expected learning_rate be float, bug got {}.'.format( - type(lr) - ) + f'Expected learning_rate be float, bug got {type(lr)}.' ) return except Exception as e: diff --git a/python/paddle/hapi/dynamic_flops.py b/python/paddle/hapi/dynamic_flops.py index bbd94bd8da8f1..fcae6e4120ac8 100644 --- a/python/paddle/hapi/dynamic_flops.py +++ b/python/paddle/hapi/dynamic_flops.py @@ -312,8 +312,6 @@ def add_hooks(m): if print_detail: table.print_table() print( - 'Total Flops: {} Total Params: {}'.format( - int(total_ops), int(total_params) - ) + f'Total Flops: {int(total_ops)} Total Params: {int(total_params)}' ) return int(total_ops) diff --git a/python/paddle/hapi/hub.py b/python/paddle/hapi/hub.py index ca4502e7a19cc..a9118eb1c6cd0 100644 --- a/python/paddle/hapi/hub.py +++ b/python/paddle/hapi/hub.py @@ -53,8 +53,8 @@ def _import_module(name, repo_dir): def _git_archive_link(repo_owner, repo_name, branch, source): if source == 'github': - return 'https://github.com/{}/{}/archive/{}.zip'.format( - repo_owner, repo_name, branch + return ( + f'https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip' ) elif source == 'gitee': return 'https://gitee.com/{}/{}/repository/archive/{}.zip'.format( @@ -199,9 +199,7 @@ def list(repo_dir, source='github', force_reload=False): """ if source not in ('github', 'gitee', 'local'): raise ValueError( - 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".'.format( - source - ) + f'Unknown source: "{source}". Allowed values: "github" | "gitee" | "local".' ) if source in ('github', 'gitee'): @@ -248,9 +246,7 @@ def help(repo_dir, model, source='github', force_reload=False): """ if source not in ('github', 'gitee', 'local'): raise ValueError( - 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".'.format( - source - ) + f'Unknown source: "{source}". Allowed values: "github" | "gitee" | "local".' ) if source in ('github', 'gitee'): @@ -293,9 +289,7 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs): """ if source not in ('github', 'gitee', 'local'): raise ValueError( - 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".'.format( - source - ) + f'Unknown source: "{source}". Allowed values: "github" | "gitee" | "local".' ) if source in ('github', 'gitee'): diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index 267e938a6e298..df5791a5fd70d 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -256,10 +256,8 @@ def summary(net, input_size=None, dtypes=None, input=None): item = (item,) assert isinstance( item, (tuple, InputSpec) - ), 'When input_size is list, \ - expect item in input_size is a tuple or InputSpec, but got {}'.format( - type(item) - ) + ), f'When input_size is list, \ + expect item in input_size is a tuple or InputSpec, but got {type(item)}' if isinstance(item, InputSpec): _input_size.append(tuple(item.shape)) diff --git a/python/paddle/hapi/progressbar.py b/python/paddle/hapi/progressbar.py index c5b25a58c12e1..bb419e9b2a1ef 100644 --- a/python/paddle/hapi/progressbar.py +++ b/python/paddle/hapi/progressbar.py @@ -96,9 +96,9 @@ def convert_uint16_to_float(in_list): if time_per_unit >= 1 or time_per_unit == 0: fps = f' - {time_per_unit:.0f}s/{self.name}' elif time_per_unit >= 1e-3: - fps = ' - {:.0f}ms/{}'.format(time_per_unit * 1e3, self.name) + fps = f' - {time_per_unit * 1e3:.0f}ms/{self.name}' else: - fps = ' - {:.0f}us/{}'.format(time_per_unit * 1e6, self.name) + fps = f' - {time_per_unit * 1e6:.0f}us/{self.name}' info = '' if self._verbose == 1: diff --git a/python/paddle/incubate/asp/asp.py b/python/paddle/incubate/asp/asp.py index 671bc0251c1e8..041132047dc71 100644 --- a/python/paddle/incubate/asp/asp.py +++ b/python/paddle/incubate/asp/asp.py @@ -1002,9 +1002,9 @@ def set_state_dict(self, state_dict): ) for param_name, var in asp_info.mask_vars.items(): param_mask_name = ASPHelper._get_mask_name(param_name) - assert param_mask_name in state_dict, "The {} is not found.".format( - param_mask_name - ) + assert ( + param_mask_name in state_dict + ), f"The {param_mask_name} is not found." var.set_value(state_dict[param_mask_name]) asp_info.update_masks(param_name, var.numpy()) return self._optimizer.set_state_dict(state_dict) diff --git a/python/paddle/incubate/asp/supported_layer_list.py b/python/paddle/incubate/asp/supported_layer_list.py index b0d420fa36b03..0ebc6ea2d3128 100644 --- a/python/paddle/incubate/asp/supported_layer_list.py +++ b/python/paddle/incubate/asp/supported_layer_list.py @@ -105,11 +105,7 @@ def add_supported_layer(layer, pruning_func=None): elif issubclass(layer, paddle.nn.Layer): name = paddle.nn.layer.layers._convert_camel_to_snake(layer.__name__) else: - assert ( - "The type of layer should be string of Layer, but got {}!".format( - type(layer) - ) - ) + assert f"The type of layer should be string of Layer, but got {type(layer)}!" if pruning_func is None: pruning_func = _default_pruning _supported_layers_and_prune_func_map_lock.acquire() diff --git a/python/paddle/incubate/asp/utils.py b/python/paddle/incubate/asp/utils.py index 1028dcc4dae29..4ed8d7e74d56e 100644 --- a/python/paddle/incubate/asp/utils.py +++ b/python/paddle/incubate/asp/utils.py @@ -536,7 +536,7 @@ def create_mask(tensor, func_name=MaskAlgo.MASK_1D, n=2, m=4): assert isinstance(func_name, MaskAlgo), ( "func_name argumet of create_mask is only accepted as type MaskAlgo. " - "But got {}".format(type(func_name)) + f"But got {type(func_name)}" ) func = getattr(sys.modules[__name__], func_name.value, None) if len(shape) == 1: @@ -559,7 +559,7 @@ def create_mask(tensor, func_name=MaskAlgo.MASK_1D, n=2, m=4): else: raise ValueError( "The dimension of input tensor is not supported in create_mask, " - "Only dimension < 4 is supported but got {}".format(len(shape)) + f"Only dimension < 4 is supported but got {len(shape)}" ) mask = func(t, n=n, m=m) @@ -606,7 +606,7 @@ def check_sparsity(tensor, func_name=CheckMethod.CHECK_1D, n=2, m=4): assert type(func_name) == CheckMethod, ( "func_name argumet of check_sparsity is only accepted as type CheckMethod. " - "But got {}".format(type(func_name)) + f"But got {type(func_name)}" ) func = getattr(sys.modules[__name__], func_name.value, None) if len(shape) == 1: @@ -623,7 +623,7 @@ def check_sparsity(tensor, func_name=CheckMethod.CHECK_1D, n=2, m=4): else: raise ValueError( "The dimension of input tensor is not supported in create_mask, " - "Only dimension < 4 is supported but got {}".format(len(shape)) + f"Only dimension < 4 is supported but got {len(shape)}" ) return func(t, n=n, m=m) diff --git a/python/paddle/incubate/distributed/fleet/fleet_util.py b/python/paddle/incubate/distributed/fleet/fleet_util.py index 4b232ea8d2c87..afa26ab672314 100644 --- a/python/paddle/incubate/distributed/fleet/fleet_util.py +++ b/python/paddle/incubate/distributed/fleet/fleet_util.py @@ -480,8 +480,8 @@ def write_model_donefile( ) else: self.rank0_error( - "not write {} because {}/{} already " - "exists".format(donefile_name, day, pass_id) + f"not write {donefile_name} because {day}/{pass_id} already " + "exists" ) else: with open(donefile_name, "w") as f: @@ -598,8 +598,8 @@ def write_xbox_donefile( ) else: self.rank0_error( - "not write {} because {}/{} already " - "exists".format(donefile_name, day, pass_id) + f"not write {donefile_name} because {day}/{pass_id} already " + "exists" ) else: with open(donefile_name, "w") as f: @@ -1021,11 +1021,7 @@ def save_paddle_inference_model( if pass_id == "-1": dest = f"{output_path}/{day}/base/dnn_plugin/" else: - dest = "{}/{}/delta-{}/dnn_plugin/".format( - output_path, - day, - pass_id, - ) + dest = f"{output_path}/{day}/delta-{pass_id}/dnn_plugin/" if not client.is_exist(dest): client.makedirs(dest) @@ -1130,11 +1126,7 @@ def save_paddle_params( if pass_id == "-1": dest = f"{output_path}/{day}/base/dnn_plugin/" else: - dest = "{}/{}/delta-{}/dnn_plugin/".format( - output_path, - day, - pass_id, - ) + dest = f"{output_path}/{day}/delta-{pass_id}/dnn_plugin/" if not client.is_exist(dest): client.mkdirs(dest) client.upload(model_name, dest, multi_processes=5, overwrite=True) @@ -2048,8 +2040,8 @@ def write_model_donefile( ) else: self.rank0_error( - "not write {} because {}/{} already " - "exists".format(donefile_name, day, pass_id) + f"not write {donefile_name} because {day}/{pass_id} already " + "exists" ) else: with open(donefile_name, "w") as f: @@ -2165,8 +2157,8 @@ def write_xbox_donefile( ) else: self.rank0_info( - "not write {} because {}/{} already " - "exists".format(donefile_name, day, pass_id) + f"not write {donefile_name} because {day}/{pass_id} already " + "exists" ) else: with open(donefile_name, "w") as f: diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py index b7158f41e9892..ddd795d7f5553 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py @@ -212,9 +212,7 @@ def set_program_config(self, config): setattr(self._program_config, key, config[key]) else: raise ValueError( - "DistributeTranspilerConfig doesn't have key: {}".format( - key - ) + f"DistributeTranspilerConfig doesn't have key: {key}" ) else: raise TypeError( diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py b/python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py index 9d9d35864063d..41d4e18ad577f 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py @@ -713,9 +713,7 @@ def delete_extra_optimizes_pass(program, config): def find_heter_ops(program, default_device="cpu"): if default_device not in DEVICE_LIST: raise ValueError( - "Given device {} is not in device list {}".format( - default_device, DEVICE_LIST - ) + f"Given device {default_device} is not in device list {DEVICE_LIST}" ) def _is_heter_op(op, current_heter_device, default_device="cpu"): @@ -1461,12 +1459,12 @@ def get_communicate_var_info( input_var_reshape_name = [] if type == "forward": - block_input_var_name = "forward_joint_{}_{}@Heter".format( - block_index - 1, block_index + block_input_var_name = ( + f"forward_joint_{block_index - 1}_{block_index}@Heter" ) else: - block_input_var_name = "backward_joint_{}_{}@Heter".format( - block_index + 1, block_index + block_input_var_name = ( + f"backward_joint_{block_index + 1}_{block_index}@Heter" ) entrance_var_list.sort() diff --git a/python/paddle/incubate/distributed/fleet/utils.py b/python/paddle/incubate/distributed/fleet/utils.py index 2be2ac7161071..17d0a4e35e693 100644 --- a/python/paddle/incubate/distributed/fleet/utils.py +++ b/python/paddle/incubate/distributed/fleet/utils.py @@ -398,9 +398,7 @@ def try_load_model_vars( ) else: logger.info( - "load feed vars from files: {}.".format( - feed_config.feeded_vars_filelist - ) + f"load feed vars from files: {feed_config.feeded_vars_filelist}." ) feed_vars = [ inference_program.global_block().var( @@ -455,9 +453,7 @@ def check_saved_vars_try_dump( v for v in dump_prog.list_vars() if io_utils.is_persistable(v) ] logger.info( - "persistable vars in dump program: {}".format( - [v.name for v in saved_params] - ) + f"persistable vars in dump program: {[v.name for v in saved_params]}" ) check_not_expected_ops(dump_prog) diff --git a/python/paddle/incubate/nn/layer/fused_dropout_nd.py b/python/paddle/incubate/nn/layer/fused_dropout_nd.py index a820654fa9efc..ded171158fe3d 100644 --- a/python/paddle/incubate/nn/layer/fused_dropout_nd.py +++ b/python/paddle/incubate/nn/layer/fused_dropout_nd.py @@ -137,6 +137,4 @@ def forward(self, input): def extra_repr(self): name_str = f', name={self.name}' if self.name else '' - return 'p={}, axis={}, mode={}{}'.format( - self.p, self.axis, self.mode, name_str - ) + return f'p={self.p}, axis={self.axis}, mode={self.mode}{name_str}' diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index a09cc521a0e79..1626403e26b5a 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -125,7 +125,7 @@ def __init__( super().__init__() assert embed_dim > 0, ( "Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim) + f"but received {embed_dim}" ) self._dtype = self._helper.get_default_dtype() self._bias_attr = bias_attr @@ -303,12 +303,10 @@ def __init__( assert embed_dim > 0, ( "Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim) + f"but received {embed_dim}" ) - assert ( - num_heads > 0 - ), "Expected nhead to be greater than 0, " "but received {}".format( - num_heads + assert num_heads > 0, ( + "Expected nhead to be greater than 0, " f"but received {num_heads}" ) self.normalize_before = normalize_before @@ -587,9 +585,7 @@ def __init__( super().__init__() assert ( d_model > 0 - ), "Expected d_model to be greater than 0, but received {}".format( - d_model - ) + ), f"Expected d_model to be greater than 0, but received {d_model}" assert ( dim_feedforward > 0 ), "Expected dim_feedforward to be greater than 0, but received {}".format( @@ -809,19 +805,15 @@ def __init__( self._config.pop("__class__", None) # py3 super().__init__() - assert ( - d_model > 0 - ), "Expected d_model to be greater than 0, " "but received {}".format( - d_model + assert d_model > 0, ( + "Expected d_model to be greater than 0, " f"but received {d_model}" ) - assert ( - nhead > 0 - ), "Expected nhead to be greater than 0, " "but received {}".format( - nhead + assert nhead > 0, ( + "Expected nhead to be greater than 0, " f"but received {nhead}" ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward) + f"but received {dim_feedforward}" ) attn_dropout_rate = ( dropout_rate if attn_dropout_rate is None else attn_dropout_rate @@ -1225,12 +1217,10 @@ def __init__( assert embed_dim > 0, ( "Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim) + f"but received {embed_dim}" ) - assert ( - num_heads > 0 - ), "Expected nhead to be greater than 0, " "but received {}".format( - num_heads + assert num_heads > 0, ( + "Expected nhead to be greater than 0, " f"but received {num_heads}" ) assert ( dim_feedforward > 0 diff --git a/python/paddle/incubate/optimizer/functional/bfgs.py b/python/paddle/incubate/optimizer/functional/bfgs.py index bc244d9c19da1..93539d4c0c3a8 100644 --- a/python/paddle/incubate/optimizer/functional/bfgs.py +++ b/python/paddle/incubate/optimizer/functional/bfgs.py @@ -125,9 +125,7 @@ def minimize_bfgs( if dtype not in ['float32', 'float64']: raise ValueError( - "The dtype must be 'float32' or 'float64', but the specified is {}.".format( - dtype - ) + f"The dtype must be 'float32' or 'float64', but the specified is {dtype}." ) op_name = 'minimize_bfgs' diff --git a/python/paddle/incubate/optimizer/functional/lbfgs.py b/python/paddle/incubate/optimizer/functional/lbfgs.py index fc482e4ca18b5..810f28b740145 100644 --- a/python/paddle/incubate/optimizer/functional/lbfgs.py +++ b/python/paddle/incubate/optimizer/functional/lbfgs.py @@ -126,9 +126,7 @@ def minimize_lbfgs( """ if dtype not in ['float32', 'float64']: raise ValueError( - "The dtype must be 'float32' or 'float64', but the specified is {}.".format( - dtype - ) + f"The dtype must be 'float32' or 'float64', but the specified is {dtype}." ) op_name = 'minimize_lbfgs' diff --git a/python/paddle/incubate/optimizer/gradient_merge.py b/python/paddle/incubate/optimizer/gradient_merge.py index c449f7405bbaf..022e4dc8fbb7b 100644 --- a/python/paddle/incubate/optimizer/gradient_merge.py +++ b/python/paddle/incubate/optimizer/gradient_merge.py @@ -154,9 +154,7 @@ def _remove_op_role_var(self, param, grad): op = grad.op assert self._is_the_backward_op( op - ), 'grad.op={} is not the backward op which produces the grad={}'.format( - op, grad.name - ) + ), f'grad.op={op} is not the backward op which produces the grad={grad.name}' block = grad.block var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()] diff --git a/python/paddle/incubate/optimizer/pipeline.py b/python/paddle/incubate/optimizer/pipeline.py index d3ee9f261b2f8..6c0e80b1f5710 100644 --- a/python/paddle/incubate/optimizer/pipeline.py +++ b/python/paddle/incubate/optimizer/pipeline.py @@ -105,9 +105,7 @@ def __init__(self, optimizer, num_microbatches=1, start_cpu_core_id=0): raise ValueError( "The 'optimizer' parameter for " "PipelineOptimizer must be an instance of " - "{}, but the given type is {}.".format( - valid_optimizers, type(optimizer) - ) + f"{valid_optimizers}, but the given type is {type(optimizer)}." ) self._optimizer = optimizer @@ -511,9 +509,7 @@ def _add_op_device_attr_for_op(self, op, idx, block): post_op = self._find_post_op(idx, out_name) assert post_op.has_attr( 'op_device' - ), "{} has no op_device attr for var {}".format( - post_op.type, out_name - ) + ), f"{post_op.type} has no op_device attr for var {out_name}" device = post_op.attr(self._op_device_key) assert device, "The post op must have op_device set." op._set_attr(self._op_device_key, device) @@ -605,8 +601,8 @@ def _add_op_device_attr_for_op(self, op, idx, block): ] assert op.type in other_known_ops, ( "For other ops without " - "op_device set, they must be one of {}, but it " - "is {}".format(other_known_ops, op.type) + f"op_device set, they must be one of {other_known_ops}, but it " + f"is {op.type}" ) assert self._is_optimize_op(op) op._set_attr(self._op_device_key, f"{self._device}:all") @@ -670,15 +666,11 @@ def _check_validation(self, block): assert op.has_attr( self._op_device_key - ), "op ({}) has no {} attribute.".format( - op.type, self._op_device_key - ) + ), f"op ({op.type}) has no {self._op_device_key} attribute." device = op.attr(self._op_device_key) - assert ( - device - ), "op_device attribute for op " "{} has not been set.".format( - op.type + assert device, ( + "op_device attribute for op " f"{op.type} has not been set." ) if device == f"{self._device}:all": continue @@ -982,7 +974,7 @@ def _insert_send_recv(cur_id, prev_id): else: raise ValueError( "Now only 'F-then-B' and '1F1B' are supported." - "The given value is {}.".format(self.schedule_mode) + f"The given value is {self.schedule_mode}." ) _insert_send_recv( @@ -1001,7 +993,7 @@ def _insert_loss_scale(self, block): if self._is_loss_grad_op(op): assert op.type == 'fill_constant', ( "loss_grad_op must be fill_constant op, " - "but this op is {}".format(op.type) + f"but this op is {op.type}" ) assert op.has_attr('value') loss_scale = float(op.attr('value')) @@ -1580,8 +1572,8 @@ def _process_persistable_vars_in_multi_sections( continue if var_name in op.desc.output_arg_names(): assert var_name not in write_info, ( - "two sections write the same var({}): second " - "op {}.".format(var_name, op) + f"two sections write the same var({var_name}): second " + f"op {op}." ) write_info[var_name] = prog break @@ -1820,7 +1812,7 @@ def _check_pipeline_persist_var(self, program): "However, some backward op don't need this var(NoNeedBufferVars), " "there will be no error at this time.\n" "So please check these persistable vars which changed in " - "forward and used in backward:\n{}".format(used_in_backward) + f"forward and used in backward:\n{used_in_backward}" ) def minimize( diff --git a/python/paddle/incubate/optimizer/recompute.py b/python/paddle/incubate/optimizer/recompute.py index 8e16bf27a1b53..9cbd8894f1889 100644 --- a/python/paddle/incubate/optimizer/recompute.py +++ b/python/paddle/incubate/optimizer/recompute.py @@ -291,9 +291,7 @@ def _insert_async_memcpy_op( def _insert_fetch_op(self, idx, varname): assert ( varname in self.checkpoint_name2pinned_name - ), "Try to fetch {} from Pinned Memory, but it is NOT a checkpoint".format( - varname - ) + ), f"Try to fetch {varname} from Pinned Memory, but it is NOT a checkpoint" pinned_varname = self.checkpoint_name2pinned_name[varname] fetch_varname = self.checkpoint_name2fetch_name[varname] @@ -302,9 +300,7 @@ def _insert_fetch_op(self, idx, varname): def _insert_offload_op(self, idx, varname): assert ( varname in self.checkpoint_name2pinned_name - ), "Try to offload {} to Pinned Memory, but it is NOT a checkpoint".format( - varname - ) + ), f"Try to offload {varname} to Pinned Memory, but it is NOT a checkpoint" pinned_varname = self.checkpoint_name2pinned_name[varname] self._insert_async_memcpy_op(idx, varname, pinned_varname, 0, 2) @@ -399,16 +395,12 @@ def _parse_backward(self): self.checkpoint_usage_count[input_var] += 1 else: raise ValueError( - "use checkpoint [{}] before fetch in BW".format( - input_var - ) + f"use checkpoint [{input_var}] before fetch in BW" ) assert ( len(self.un_fetch_checkpoint_names) == 0 - ), "{} checkpoints have NOT been Recorded".format( - self.un_fetch_checkpoint_names - ) + ), f"{self.un_fetch_checkpoint_names} checkpoints have NOT been Recorded" def _update_backward(self): if len(self.idx2insertions) == 0: @@ -551,9 +543,7 @@ def _parse_forward(self): assert ( len(self.un_offload_checkpoint_names) == 0 - ), "{} checkpoints have NOT been Recorded".format( - self.un_fetch_checkpoint_names - ) + ), f"{self.un_fetch_checkpoint_names} checkpoints have NOT been Recorded" assert len(self.synced_checkpoints) == len( need_offload_checkpoint_names ), "{} checkpoints have NOT been Recorded".format( diff --git a/python/paddle/incubate/passes/ir.py b/python/paddle/incubate/passes/ir.py index 8b9d9944d9bee..1b960855ef46c 100644 --- a/python/paddle/incubate/passes/ir.py +++ b/python/paddle/incubate/passes/ir.py @@ -287,9 +287,7 @@ def mapped_op(pattern_ops): ops = [o for o in pattern_ops if o._type == op] if len(ops) <= index: raise ValueError( - "Index '{}' of operator '{}' is incorrect.".format( - index, op - ) + f"Index '{index}' of operator '{op}' is incorrect." ) return PassDesc.AttrHelper( ops[index], name, element_index=element_index @@ -336,12 +334,7 @@ def _to_readable_code(self, skip_op_callstack=True): attrs_str += ", ".join([f"{k}={v}" for k, v in self._attrs.items()]) attrs_str += "}" - op_str = "{outputs} = {op_type}(inputs={inputs}, {attrs})".format( - outputs=outputs_str, - op_type=self._type, - inputs=inputs_str, - attrs=attrs_str, - ) + op_str = f"{outputs_str} = {self._type}(inputs={inputs_str}, {attrs_str})" return op_str def __init__(self, type=None): @@ -400,9 +393,7 @@ def Init(self): self._proto = OpProtoHolder.instance().op_proto_map.get(self._type) if self._proto is None: raise AttributeError( - "type object 'OpHelper' has no attribute '{}'".format( - self._type - ) + f"type object 'OpHelper' has no attribute '{self._type}'" ) self._index = len(block.ops) self._desc = block.desc.append_op() @@ -429,9 +420,7 @@ def Output(self, name): output = self._outputs.get(name) if output is None: raise ValueError( - "Operator '{}' does not have output named '{}'.".format( - self._type, name - ) + f"Operator '{self._type}' does not have output named '{name}'." ) return output diff --git a/python/paddle/io/dataloader/batch_sampler.py b/python/paddle/io/dataloader/batch_sampler.py index 78c93151a390d..20a9bb9a00da4 100644 --- a/python/paddle/io/dataloader/batch_sampler.py +++ b/python/paddle/io/dataloader/batch_sampler.py @@ -112,9 +112,7 @@ def __init__( ), "either dataset or sampler should be set" assert isinstance( sampler, Sampler - ), "sampler should be a paddle.io.Sampler, but got {}".format( - type(sampler) - ) + ), f"sampler should be a paddle.io.Sampler, but got {type(sampler)}" assert not shuffle, "shuffle should be False when sampler is set" self.sampler = sampler else: @@ -124,9 +122,7 @@ def __init__( assert sampler is None, "should not set both dataset and sampler" assert isinstance( shuffle, bool - ), "shuffle should be a boolean value, but got {}".format( - type(shuffle) - ) + ), f"shuffle should be a boolean value, but got {type(shuffle)}" if shuffle: self.sampler = RandomSampler(dataset) else: @@ -134,15 +130,11 @@ def __init__( assert ( isinstance(batch_size, int) and batch_size > 0 - ), "batch_size should be a positive integer, but got {}".format( - batch_size - ) + ), f"batch_size should be a positive integer, but got {batch_size}" self.batch_size = batch_size assert isinstance( drop_last, bool - ), "drop_last should be a boolean value, but got {}".format( - type(drop_last) - ) + ), f"drop_last should be a boolean value, but got {type(drop_last)}" self.drop_last = drop_last def __iter__(self): diff --git a/python/paddle/io/dataloader/collate.py b/python/paddle/io/dataloader/collate.py index 141624668f09b..cf3d3be5e847f 100644 --- a/python/paddle/io/dataloader/collate.py +++ b/python/paddle/io/dataloader/collate.py @@ -76,7 +76,7 @@ def default_collate_fn(batch): raise TypeError( "batch data con only contains: tensor, numpy.ndarray, " - "dict, list, number, but got {}".format(type(sample)) + f"dict, list, number, but got {type(sample)}" ) diff --git a/python/paddle/io/dataloader/dataloader_iter.py b/python/paddle/io/dataloader/dataloader_iter.py index 58a5c07139af5..ed64f5da3d9e9 100644 --- a/python/paddle/io/dataloader/dataloader_iter.py +++ b/python/paddle/io/dataloader/dataloader_iter.py @@ -362,10 +362,9 @@ def __init__(self, loader): self._persistent_workers = loader._persistent_workers self._resume_worker_cnt = 0 - assert ( - self._num_workers > 0 - ), "Multi-process DataLoader " "invalid num_workers({})".format( - self._num_workers + assert self._num_workers > 0, ( + "Multi-process DataLoader " + f"invalid num_workers({self._num_workers})" ) # subprocess wrokers' result queue @@ -706,8 +705,8 @@ def _get_data(self): self._exit_thread_unexpectedly() pids = ', '.join(str(w.pid) for w in failed_workers) raise RuntimeError( - "DataLoader {} workers exit unexpectedly, " - "pids: {}".format(len(failed_workers), pids) + f"DataLoader {len(failed_workers)} workers exit unexpectedly, " + f"pids: {pids}" ) # get(timeout) will call _poll(timeout) and may raise IOError @@ -717,8 +716,8 @@ def _get_data(self): self._exit_thread_unexpectedly() logging.error( - "DataLoader reader thread failed({}) to read data from " - "workers' result queue.".format(e) + f"DataLoader reader thread failed({e}) to read data from " + "workers' result queue." ) raise e else: diff --git a/python/paddle/io/dataloader/flat.py b/python/paddle/io/dataloader/flat.py index 87c35e6dedd38..36b899e3f55c2 100644 --- a/python/paddle/io/dataloader/flat.py +++ b/python/paddle/io/dataloader/flat.py @@ -143,8 +143,8 @@ def _restore(structure, field_idx): # sample only contains single fields if isinstance(structure, (str, bytes)): - assert structure == '{}{}'.format( - FIELD_PREFIX, 0 + assert ( + structure == f'{FIELD_PREFIX}{0}' ), f"invalid structure: {structure}" return flat_batch[0] field_idx = _restore(structure, 0) diff --git a/python/paddle/io/dataloader/sampler.py b/python/paddle/io/dataloader/sampler.py index d26316ecc0eb7..44bc545f777cd 100644 --- a/python/paddle/io/dataloader/sampler.py +++ b/python/paddle/io/dataloader/sampler.py @@ -208,7 +208,7 @@ def __init__( if not isinstance(self.replacement, bool): raise TypeError( "expect boolean value for replacement, but got " - "replacement={}".format(self.replacement) + f"replacement={self.replacement}" ) if self._num_samples is not None and not replacement: @@ -219,7 +219,7 @@ def __init__( if not isinstance(self.num_samples, int) or self.num_samples <= 0: raise ValueError( "num_samples should be a positive integer, " - "but got num_samples={}".format(self.num_samples) + f"but got num_samples={self.num_samples}" ) @property diff --git a/python/paddle/io/dataloader/worker.py b/python/paddle/io/dataloader/worker.py index 814dc667a7cf3..8829b6ee13d5c 100644 --- a/python/paddle/io/dataloader/worker.py +++ b/python/paddle/io/dataloader/worker.py @@ -166,9 +166,7 @@ def __init__(self, **kwargs): def __setattr__(self, key, val): if self.__initialized: raise RuntimeError( - "Cannot assign attributes to {} objects".format( - self.__class__.__name__ - ) + f"Cannot assign attributes to {self.__class__.__name__} objects" ) return super().__setattr__(key, val) diff --git a/python/paddle/io/reader.py b/python/paddle/io/reader.py index ca9a9eabecf87..fc8446b4f4e42 100644 --- a/python/paddle/io/reader.py +++ b/python/paddle/io/reader.py @@ -446,9 +446,7 @@ def __init__( self.dataset_kind = _DatasetKind.ITER if shuffle: raise ValueError( - "IterableDataset not support shuffle, but got shuffle={}".format( - shuffle - ) + f"IterableDataset not support shuffle, but got shuffle={shuffle}" ) if batch_sampler is not None: raise ValueError( diff --git a/python/paddle/jit/api.py b/python/paddle/jit/api.py index 5799f740d147f..a0ca693ccfcd0 100644 --- a/python/paddle/jit/api.py +++ b/python/paddle/jit/api.py @@ -661,9 +661,9 @@ def _build_load_path_and_config(path, config): directory_format_exist = os.path.isdir(path) if prefix_format_exist and directory_format_exist: raise ValueError( - "The {}.pdmodel and {} directory exist at the same time, " + f"The {path}.pdmodel and {path} directory exist at the same time, " "don't know which one to load, please make sure that the specified target " - "of ``path`` is unique.".format(path, path) + "of ``path`` is unique." ) elif not prefix_format_exist and not directory_format_exist: raise ValueError( diff --git a/python/paddle/jit/dy2static/base_transformer.py b/python/paddle/jit/dy2static/base_transformer.py index e3e2dc7b39941..7d61b3a7417b7 100644 --- a/python/paddle/jit/dy2static/base_transformer.py +++ b/python/paddle/jit/dy2static/base_transformer.py @@ -390,8 +390,8 @@ def _build_index_init_node(self): index_init_value_str = '0' index_init_var_name = self.iter_idx_name - index_init_node_source_str = "{target} = {value}".format( - target=index_init_var_name, value=index_init_value_str + index_init_node_source_str = ( + f"{index_init_var_name} = {index_init_value_str}" ) index_init_node = gast.parse(index_init_node_source_str).body[0] @@ -456,9 +456,7 @@ def _build_enum_init_node(self): else: init_value_str = '0' - enum_init_node_source_str = "{} = {}".format( - self.enum_idx_name, init_value_str - ) + enum_init_node_source_str = f"{self.enum_idx_name} = {init_value_str}" enum_init_node = gast.parse(enum_init_node_source_str).body[0] return enum_init_node diff --git a/python/paddle/jit/dy2static/basic_api_transformer.py b/python/paddle/jit/dy2static/basic_api_transformer.py index 40c5a5f511bde..64dfa67b6cfe6 100644 --- a/python/paddle/jit/dy2static/basic_api_transformer.py +++ b/python/paddle/jit/dy2static/basic_api_transformer.py @@ -208,9 +208,7 @@ def visit_Attribute(self, node): value = node.value node = ( gast.parse( - "_jst.Attr({}, \"{}\")".format( - utils.ast_to_source_code(value).strip(), attr - ) + f"_jst.Attr({utils.ast_to_source_code(value).strip()}, \"{attr}\")" ) .body[0] .value diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 1082897ed8520..47618392175d9 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -570,7 +570,7 @@ def convert_zip(*args): if isinstance(arg, Variable) and arg.shape[0] == -1: raise RuntimeError( "Not support zip(tensor, ...) when tensor.shape[0] == -1, " - "but found args[{}].shape[0] == -1 in 'zip'".format(str(i)) + f"but found args[{str(i)}].shape[0] == -1 in 'zip'" ) return zip(*args) diff --git a/python/paddle/jit/dy2static/decorator_transformer.py b/python/paddle/jit/dy2static/decorator_transformer.py index a61c25dd6082e..3de950d0478aa 100644 --- a/python/paddle/jit/dy2static/decorator_transformer.py +++ b/python/paddle/jit/dy2static/decorator_transformer.py @@ -82,9 +82,7 @@ def visit_FunctionDef(self, node): # match case like: # @a.d.g.deco re_tmp = re.match( - r'({module})*({name})$'.format( - name=RE_PYNAME, module=RE_PYMODULE - ), + fr'({RE_PYMODULE})*({RE_PYNAME})$', deco_full_name, ) deco_name = re_tmp.group(2) diff --git a/python/paddle/jit/dy2static/error.py b/python/paddle/jit/dy2static/error.py index 5cad201d9ebf1..96124f1369087 100644 --- a/python/paddle/jit/dy2static/error.py +++ b/python/paddle/jit/dy2static/error.py @@ -203,9 +203,7 @@ def numpy_api_check(self, format_exception, error_line): func_str = None for frame in tb: searched_name = re.search( - r'({module})*{name}'.format( - module=RE_PYMODULE, name=frame.name - ), + fr'({RE_PYMODULE})*{frame.name}', error_line, ) if searched_name: @@ -339,9 +337,7 @@ def _create_revise_suggestion(self, bottom_error_message): for suggestion in self.suggestion_dict[keywords]: suggestion_msg = ( ' ' * BLANK_COUNT_BEFORE_FILE_STR * 2 - + '{}. {}'.format( - str(len(revise_suggestions) - 1), suggestion - ) + + f'{str(len(revise_suggestions) - 1)}. {suggestion}' ) revise_suggestions.append(suggestion_msg) return revise_suggestions if len(revise_suggestions) > 2 else [] diff --git a/python/paddle/jit/dy2static/function_spec.py b/python/paddle/jit/dy2static/function_spec.py index e2966e4097d86..ec835ee6e9540 100644 --- a/python/paddle/jit/dy2static/function_spec.py +++ b/python/paddle/jit/dy2static/function_spec.py @@ -395,9 +395,7 @@ def convert_to_input_spec(inputs, input_spec): def check_type_and_len(input, spec, check_length=False): if type(input) is not type(spec): raise TypeError( - 'type(input) should be {}, but received {}.'.format( - type(spec), type(input) - ) + f'type(input) should be {type(spec)}, but received {type(input)}.' ) if check_length and len(input) < len(spec): raise ValueError( @@ -444,9 +442,7 @@ def check_type_and_len(input, spec, check_length=False): real_spec = _replace_value_with_input_spec([inputs])[0] if not isinstance(real_spec, paddle.static.InputSpec): raise RuntimeError( - "Give input spec into a non-tensorable arguments `{}`.".format( - inputs - ) + f"Give input spec into a non-tensorable arguments `{inputs}`." ) real_spec.name = input_spec.name if spec_greater(input_spec, real_spec): diff --git a/python/paddle/jit/dy2static/logical_transformer.py b/python/paddle/jit/dy2static/logical_transformer.py index 90002c6e4bd0d..c2719d2c177f1 100644 --- a/python/paddle/jit/dy2static/logical_transformer.py +++ b/python/paddle/jit/dy2static/logical_transformer.py @@ -85,9 +85,7 @@ def _create_bool_op_node(self, nodes, api_type): ''' assert ( len(nodes) > 1 - ), "The length of BoolOp should be at least 2, but received {}.".format( - len(nodes) - ) + ), f"The length of BoolOp should be at least 2, but received {len(nodes)}." if len(nodes) > 2: # Creates logic_and/logic_or node recursively. pre_logic_node = self._create_bool_op_node(nodes[:2], api_type) @@ -98,9 +96,7 @@ def _create_bool_op_node(self, nodes, api_type): nodes = [pre_logic_node] + [post_logic_node] args = [ast_to_source_code(child) for child in nodes] - new_node_str = "_jst.{}(lambda:{}, lambda:{})".format( - api_type, args[0], args[1] - ) + new_node_str = f"_jst.{api_type}(lambda:{args[0]}, lambda:{args[1]})" # NOTE: gast.parse return Module(body=[expr(...)]) new_node = gast.parse(new_node_str).body[0].value return new_node diff --git a/python/paddle/jit/dy2static/origin_info.py b/python/paddle/jit/dy2static/origin_info.py index d6fee112ded91..d23d8a58431a8 100644 --- a/python/paddle/jit/dy2static/origin_info.py +++ b/python/paddle/jit/dy2static/origin_info.py @@ -41,9 +41,7 @@ def __init__(self, filepath, lineno, col_offset=None): self.col_offset = col_offset def __str__(self): - return "location: {}:{}:{}".format( - self.filepath, self.lineno, self.col_offset - ) + return f"location: {self.filepath}:{self.lineno}:{self.col_offset}" @property def line_location(self): @@ -311,9 +309,7 @@ def get_new_op_callstack(callstack): if dygraph_func_info: filepath, lineno, funcname, code = dygraph_func_info.as_frame() - callstack[i] = ' File "{}", line {}, in {}'.format( - filepath, lineno, funcname - ) + callstack[i] = f' File "{filepath}", line {lineno}, in {funcname}' callstack[i + 1] = f' {code}' return callstack diff --git a/python/paddle/jit/dy2static/utils.py b/python/paddle/jit/dy2static/utils.py index 1bdf46629dfd0..013fff9178e7c 100644 --- a/python/paddle/jit/dy2static/utils.py +++ b/python/paddle/jit/dy2static/utils.py @@ -333,8 +333,8 @@ def to_static_api(dygraph_class): return dygraph_class_to_static_api[dygraph_class] else: raise NotImplementedError( - "Paddle dygraph API {} cannot be converted " - "to static graph at present.".format(dygraph_class) + f"Paddle dygraph API {dygraph_class} cannot be converted " + "to static graph at present." ) @@ -1303,12 +1303,10 @@ def get_args_0(): """ def empty_node(): - func_def = """ - def {func_name}(): + func_def = f""" + def {unique_name.generate(GET_ARGS_FUNC_PREFIX)}(): return - """.format( - func_name=unique_name.generate(GET_ARGS_FUNC_PREFIX) - ) + """ return gast.parse(textwrap.dedent(func_def)).body[0] assert isinstance(names, (list, tuple)) @@ -1342,12 +1340,10 @@ def set_args_0(__args): """ def empty_node(): - func_def = """ - def {func_name}({args}): + func_def = f""" + def {unique_name.generate(SET_ARGS_FUNC_PREFIX)}({ARGS_NAME}): pass - """.format( - func_name=unique_name.generate(SET_ARGS_FUNC_PREFIX), args=ARGS_NAME - ) + """ return gast.parse(textwrap.dedent(func_def)).body[0] assert isinstance(names, (list, tuple)) @@ -1416,9 +1412,7 @@ def get(self, names): for n in names: assert ( n in self.name2id - ), "the name `{}` not in name union set`{}`.".format( - n, self.name2id.keys() - ) + ), f"the name `{n}` not in name union set`{self.name2id.keys()}`." return tuple(vars[self.name2id[n]] for n in names) def set(self, names, values): @@ -1432,9 +1426,7 @@ def set(self, names, values): for n in names: assert ( n in self.name2id - ), "the name `{}` not in name union set`{}`.".format( - n, self.name2id.keys() - ) + ), f"the name `{n}` not in name union set`{self.name2id.keys()}`." vars = list(vars) indices = [self.name2id[n] for n in names] for i, v in zip(indices, values): diff --git a/python/paddle/jit/dy2static/variable_trans_func.py b/python/paddle/jit/dy2static/variable_trans_func.py index ee358d57ee019..b32001dd28f7b 100644 --- a/python/paddle/jit/dy2static/variable_trans_func.py +++ b/python/paddle/jit/dy2static/variable_trans_func.py @@ -29,20 +29,14 @@ def create_undefined_var(name): def create_fill_constant_node(name, value=0): func_code = f"{name} = paddle.full(shape=[1], " if isinstance(value, bool): - func_code += "dtype='bool', fill_value={}, name='{}')".format( - value, name - ) + func_code += f"dtype='bool', fill_value={value}, name='{name}')" return gast.parse(func_code).body[0] if isinstance(value, float): - func_code += "dtype='float64', fill_value={}, name='{}')".format( - value, name - ) + func_code += f"dtype='float64', fill_value={value}, name='{name}')" return gast.parse(func_code).body[0] if isinstance(value, int): - func_code += "dtype='int64', fill_value={}, name='{}')".format( - value, name - ) + func_code += f"dtype='int64', fill_value={value}, name='{name}')" return gast.parse(func_code).body[0] diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 9cac2ff006c72..2760b448a7027 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -120,9 +120,7 @@ def reset(self): Reset states and result """ raise NotImplementedError( - "function 'reset' not implemented in {}.".format( - self.__class__.__name__ - ) + f"function 'reset' not implemented in {self.__class__.__name__}." ) @abc.abstractmethod @@ -138,9 +136,7 @@ def update(self, *args): see :code:`Metric.compute` """ raise NotImplementedError( - "function 'update' not implemented in {}.".format( - self.__class__.__name__ - ) + f"function 'update' not implemented in {self.__class__.__name__}." ) @abc.abstractmethod @@ -149,9 +145,7 @@ def accumulate(self): Accumulates statistics, computes and returns the metric value """ raise NotImplementedError( - "function 'accumulate' not implemented in {}.".format( - self.__class__.__name__ - ) + f"function 'accumulate' not implemented in {self.__class__.__name__}." ) @abc.abstractmethod @@ -160,9 +154,7 @@ def name(self): Returns metric name """ raise NotImplementedError( - "function 'name' not implemented in {}.".format( - self.__class__.__name__ - ) + f"function 'name' not implemented in {self.__class__.__name__}." ) def compute(self, *args): diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index e02a47d7bf8dd..58364cc6d78a5 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -564,7 +564,7 @@ def prelu(x, weight, data_format="NCHW", name=None): if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " - "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format) + f"'NLC', 'NHWC', 'NDHWC' but receive {data_format}" ) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' @@ -700,9 +700,7 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None): if upper > 1: raise ValueError( - "The upper value must be no greater than one. Received: {}.".format( - upper - ) + f"The upper value must be no greater than one. Received: {upper}." ) is_test = not training @@ -1344,9 +1342,7 @@ def softshrink(x, threshold=0.5, name=None): """ if threshold < 0: raise ValueError( - "The threshold must be no less than zero. Received: {}.".format( - threshold - ) + f"The threshold must be no less than zero. Received: {threshold}." ) if in_dynamic_mode(): diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 9b1da0dd36802..de606db3c39d1 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -592,8 +592,8 @@ def _is_list_or_turple_(data): elif isinstance(scale, (list, tuple)): if len(scale) != len(x.shape) - 2: raise ValueError( - "scale_shape length should be {} for " - "input {}-D tensor.".format(len(x.shape) - 2, len(x.shape)) + f"scale_shape length should be {len(x.shape) - 2} for " + f"input {len(x.shape)}-D tensor." ) for value in scale: if value <= 0: @@ -1366,9 +1366,7 @@ def dropout2d(x, p=0.5, training=True, data_format='NCHW', name=None): input_shape = x.shape if len(input_shape) != 4: raise ValueError( - "dimensions of x should be 4, but received {} != 4".format( - len(input_shape) - ) + f"dimensions of x should be 4, but received {len(input_shape)} != 4" ) if data_format not in ["NCHW", "NHWC"]: @@ -1424,9 +1422,7 @@ def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None): input_shape = x.shape if len(input_shape) != 5: raise ValueError( - "dimensions of x should be 5, but received {} != 5".format( - len(input_shape) - ) + f"dimensions of x should be 5, but received {len(input_shape)} != 5" ) if data_format not in ["NCDHW", "NDHWC"]: @@ -1644,14 +1640,12 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None): 'replicate', 'constant', 'circular', - ], "mode should be one of constant, reflect, replicate, circular, but got {}.".format( - mode - ) + ], f"mode should be one of constant, reflect, replicate, circular, but got {mode}." data_format = data_format.upper() assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], ( "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " - "but got {}".format(data_format) + f"but got {data_format}" ) x_dim = len(x.shape) @@ -2213,19 +2207,15 @@ class centers and the shape of sampled_class_center will be [num_positive_class_ label_size *= dim if label_size != -1 and label_size < 1: raise ValueError( - 'Expected label_size > 0 \ - (got label_size: {})'.format( - label_size - ) + f'Expected label_size > 0 \ + (got label_size: {label_size})' ) label_dims = len(list(label.shape)) if label_dims != 1: raise ValueError( - 'Expected label_dims == 1 \ - (got label_dims: {})'.format( - label_dims - ) + f'Expected label_dims == 1 \ + (got label_dims: {label_dims})' ) seed = None diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 00e45f789ae5e..55c8776e9ffbd 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -60,9 +60,7 @@ def _update_padding_nd(padding, channel_last, num_dims): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".format( - padding - ) + f"Unknown padding: '{padding}'. It can only be 'SAME' or 'VALID'." ) if padding == "VALID": padding_algorithm = "VALID" @@ -77,8 +75,8 @@ def _update_padding_nd(padding, channel_last, num_dims): if len(padding) == 2 + num_dims and _is_list_or_tuple(padding[0]): if not _zero_padding_in_batch_and_channel(padding, channel_last): raise ValueError( - "Non-zero padding({}) in the batch or channel dimensions " - "is not supported.".format(padding) + f"Non-zero padding({padding}) in the batch or channel dimensions " + "is not supported." ) padding_algorithm = "EXPLICIT" padding = _exclude_padding_in_batch_and_channel( @@ -396,7 +394,7 @@ def conv1d( if data_format not in ["NCL", "NLC"]: raise ValueError( "Attr(data_format) should be 'NCL' or 'NLC'. " - "Received Attr(data_format): {}.".format(data_format) + f"Received Attr(data_format): {data_format}." ) channel_last = data_format == "NLC" @@ -404,22 +402,18 @@ def conv1d( conv2d_data_format = "NHWC" if channel_last else "NCHW" if len(x.shape) != 3: raise ValueError( - "Input x should be 3D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 3D tensor, but received x with the shape of {x.shape}" ) num_channels = x.shape[channel_dim] num_filters = weight.shape[0] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) " - "should be defined. Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) " + f"should be defined. Received: {num_channels}." ) if groups <= 0: raise ValueError( - "The groups of conv1d should be greater than 0. Received groups: {}".format( - groups - ) + f"The groups of conv1d should be greater than 0. Received groups: {groups}" ) if num_channels % groups != 0: raise ValueError( @@ -647,29 +641,25 @@ def conv2d( if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. " - "Received Attr(data_format): {}.".format(data_format) + f"Received Attr(data_format): {data_format}." ) channel_last = data_format == "NHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 4D tensor, but received x with the shape of {x.shape}" ) num_channels = x.shape[channel_dim] num_filters = weight.shape[0] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) " - "should be defined. Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) " + f"should be defined. Received: {num_channels}." ) if groups <= 0: raise ValueError( - "The groups of conv2d should be greater than 0. Received groups: {}".format( - groups - ) + f"The groups of conv2d should be greater than 0. Received groups: {groups}" ) if num_channels % groups != 0: raise ValueError( @@ -911,24 +901,20 @@ def conv1d_transpose( if data_format not in ['NCL', 'NLC']: raise ValueError( "Attr(data_format) of conv2d_transpose got wrong value: " - "received {}, but only 'NCL' or 'NLC' are supported.".format( - data_format - ) + f"received {data_format}, but only 'NCL' or 'NLC' are supported." ) channel_last = data_format == "NLC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 3: raise ValueError( - "Input x should be 3D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 3D tensor, but received x with the shape of {x.shape}" ) num_channels = x.shape[channel_dim] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) " - "should be defined. Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) " + f"should be defined. Received: {num_channels}." ) if groups <= 0: raise ValueError( @@ -952,9 +938,7 @@ def conv1d_transpose( padding = padding + [0] else: raise ValueError( - "The size of padding's dimension should 1 or 2. But got padding={}".format( - padding - ) + f"The size of padding's dimension should 1 or 2. But got padding={padding}" ) stride = convert_to_list(stride, 1, 'stride') + [1] @@ -985,9 +969,7 @@ def conv1d_transpose( if len(output_padding) > 0 and output_padding[0] > stride[0]: raise ValueError( "The size of output_padding should not be greater than stride." - "But got output_padding={} and stride={}".format( - output_padding[0], stride[0] - ) + f"But got output_padding={output_padding[0]} and stride={stride[0]}" ) if len(weight.shape) != 3: @@ -1198,17 +1180,13 @@ def conv2d_transpose( if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of conv2d_transpose got wrong value: " - "received {}, but only 'NCHW' or 'NHWC' are supported.".format( - data_format - ) + f"received {data_format}, but only 'NCHW' or 'NHWC' are supported." ) channel_last = data_format == "NHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 4D tensor, but received x with the shape of {x.shape}" ) if len(weight.shape) != 4: raise ValueError( @@ -1219,8 +1197,8 @@ def conv2d_transpose( num_channels = x.shape[channel_dim] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) " - "should be defined. Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) " + f"should be defined. Received: {num_channels}." ) if groups <= 0: raise ValueError( @@ -1491,43 +1469,35 @@ def conv3d( if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format) + f"Attr(data_format): {data_format}." ) channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 5D tensor, but received x with the shape of {x.shape}" ) num_channels = x.shape[channel_dim] num_filters = weight.shape[0] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) should be defined. " + f"Received: {num_channels}." ) if groups <= 0: raise ValueError( - "The groups of conv3d should be greater than 0. Received groups: {}".format( - groups - ) + f"The groups of conv3d should be greater than 0. Received groups: {groups}" ) if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " - "Received: number of channels({}), groups({}).".format( - num_channels, groups - ) + f"Received: number of channels({num_channels}), groups({groups})." ) if num_filters % groups != 0: raise ValueError( "The number of filters must be divisible by Attr(groups). " - "Received: number of filters({}), groups({}).".format( - num_filters, groups - ) + f"Received: number of filters({num_filters}), groups({groups})." ) cudnn_version = get_cudnn_version() @@ -1705,16 +1675,14 @@ def conv3d_transpose( if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format) + f"Attr(data_format): {data_format}." ) channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 5D tensor, but received x with the shape of {x.shape}" ) if len(weight.shape) != 5: raise ValueError( @@ -1726,8 +1694,8 @@ def conv3d_transpose( num_filters = weight.shape[1] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) should be defined. " + f"Received: {num_channels}." ) if groups <= 0: raise ValueError( @@ -1738,9 +1706,7 @@ def conv3d_transpose( if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " - "Received: number of channels({}), groups({}).".format( - num_channels, groups - ) + f"Received: number of channels({num_channels}), groups({groups})." ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, 3) diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index 252afc268bf7c..757c9059efdd6 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -405,7 +405,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. " - "Received Attr(data_format): {}.".format(data_format) + f"Received Attr(data_format): {data_format}." ) if in_dynamic_mode(): return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format) diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index 0a714bce0b992..e38797a1115ae 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -219,9 +219,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): if padding_idx >= weight.shape[0] or padding_idx < -weight.shape[0]: raise ValueError( - "padding_idx must be within [-{}, {})".format( - weight.shape[0], weight.shape[0] - ) + f"padding_idx must be within [-{weight.shape[0]}, {weight.shape[0]})" ) if in_dynamic_or_pir_mode(): diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 9bf445054cd14..e74e67d83f88e 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -262,10 +262,8 @@ def base_softmax_with_cross_entropy( label_dims = len(list(label.shape)) if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( - 'Expected input_dims - 1 = label_dims or input_dims == label_dims\ - (got input_dims{}, label_dims{})'.format( - input_dims, label_dims - ) + f'Expected input_dims - 1 = label_dims or input_dims == label_dims\ + (got input_dims{input_dims}, label_dims{label_dims})' ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=axis) @@ -1424,10 +1422,8 @@ def nll_loss( if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( - "Expected input_dims - 1 = label_dims or input_dims == label_dims\ - (got input_dims{}, label_dims{})".format( - input_dims, label_dims - ) + f"Expected input_dims - 1 = label_dims or input_dims == label_dims\ + (got input_dims{input_dims}, label_dims{label_dims})" ) if input_dims < 2: @@ -1435,9 +1431,7 @@ def nll_loss( if input_shape[1] < 1: raise ValueError( - "Expected 1 or more classess (got num classes{})".format( - input_shape[1] - ) + f"Expected 1 or more classess (got num classes{input_shape[1]})" ) n = input_shape[0] @@ -1781,7 +1775,7 @@ def mse_loss(input, label, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'mse_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) if not in_dynamic_mode(): @@ -2309,10 +2303,8 @@ def margin_cross_entropy( label_dims = len(list(label.shape)) if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( - 'Expected input_dims - 1 = label_dims or input_dims == label_dims\ - (got input_dims{}, label_dims{})'.format( - input_dims, label_dims - ) + f'Expected input_dims - 1 = label_dims or input_dims == label_dims\ + (got input_dims{input_dims}, label_dims{label_dims})' ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=-1) @@ -2795,10 +2787,8 @@ def cross_entropy( if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( - 'Expected nput_dims - 1 = label_dims or input_dims == label_dims\ - (got nput_dims{}, label_dims{})'.format( - input_dims, label_dims - ) + f'Expected nput_dims - 1 = label_dims or input_dims == label_dims\ + (got nput_dims{input_dims}, label_dims{label_dims})' ) if label_smoothing > 0.0: @@ -2846,11 +2836,9 @@ def cross_entropy( else: if input.shape[axis] != weight.shape[-1]: raise ValueError( - "input's class_dimension({}) must equal to " - "weight's class_dimension({}) " - "when weight is provided".format( - input.shape[axis], weight.shape[-1] - ) + f"input's class_dimension({input.shape[axis]}) must equal to " + f"weight's class_dimension({weight.shape[-1]}) " + "when weight is provided" ) ignore_weight_mask = paddle.cast( @@ -2993,11 +2981,9 @@ def cross_entropy( else: if input.shape[axis] != weight.shape[-1]: raise ValueError( - "input's class_dimension({}) must equal to " - "weight's class_dimension({}) " - "when weight is provided".format( - input.shape[axis], weight.shape[-1] - ) + f"input's class_dimension({input.shape[axis]}) must equal to " + f"weight's class_dimension({weight.shape[-1]}) " + "when weight is provided" ) valid_label = paddle.multiply( @@ -3321,7 +3307,7 @@ def multi_label_soft_margin_loss( if not (input.shape == label.shape): raise ValueError( "The input and label should have same dimension," - "but received {}!={}".format(input.shape, label.shape) + f"but received {input.shape}!={label.shape}" ) if not in_dynamic_mode(): @@ -3442,7 +3428,7 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'hinge_embedding_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) if not in_dynamic_mode(): @@ -3669,7 +3655,7 @@ def triplet_margin_with_distance_loss( raise ValueError( "'reduction' in 'triplet_margin_with_distance_loss' " "should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) if margin < 0: raise ValueError( @@ -3819,7 +3805,7 @@ def triplet_margin_loss( if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'triplet_margin_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) if margin < 0: raise ValueError( @@ -3934,7 +3920,7 @@ def multi_margin_loss( if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'multi_margin_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) if not in_dynamic_mode(): diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index a0184444e4611..704eb880c516c 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -183,7 +183,7 @@ def batch_norm( if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " - "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format) + f"'NLC', 'NHWC', 'NDHWC' but receive {data_format}" ) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' @@ -545,23 +545,21 @@ def local_response_norm( if data_format not in ['NCL', 'NLC', 'NCHW', 'NHWC', 'NCDHW', 'NDHWC']: raise ValueError( "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " - "but got {}".format(data_format) + f"but got {data_format}" ) sizes = x.shape dim = len(sizes) if dim < 3: raise ValueError( - 'Expected 3D or higher dimensionality input, but got {} dimensions'.format( - dim - ) + f'Expected 3D or higher dimensionality input, but got {dim} dimensions' ) for i, sz in enumerate(sizes): if not sz > 0 and i > 0: raise ValueError( "Expected every dim's size to be larger than 0, " - "but the size of the {}-th dim is {}".format(i, sz) + f"but the size of the {i}-th dim is {sz}" ) channel_last = True if data_format[-1] == "C" else False diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 2642996ddee77..5e5bf191dfc38 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -48,9 +48,7 @@ def _check_input(x, dimension): def _check_instance(x, x_name, types=(int, float)): if not isinstance(x, types): raise ValueError( - "Excepted {} type for {} but received type: {}. ".format( - types, x_name, type(x) - ) + f"Excepted {types} type for {x_name} but received type: {type(x)}. " ) @@ -112,9 +110,7 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".format( - padding - ) + f"Unknown padding: '{padding}'. It can only be 'SAME' or 'VALID'." ) if padding == "VALID": if ceil_mode is not False: @@ -135,8 +131,8 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): if len(padding) == 2 + num_dims and _is_list_or_tuple(padding[0]): if not _zero_padding_in_batch_and_channel(padding, channel_last): raise ValueError( - "Non-zero padding({}) in the batch or channel dimensions " - "is not supported.".format(padding) + f"Non-zero padding({padding}) in the batch or channel dimensions " + "is not supported." ) padding_algorithm = "EXPLICIT" padding = _exclude_padding_in_batch_and_channel( diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index d49ca156f77ea..d41ccc975f191 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -267,9 +267,7 @@ def grid_sample( _padding_modes = ['zeros', 'reflection', 'border'] if mode not in _modes: raise ValueError( - "The mode of grid sample function should be in {}, but got: {}".format( - _modes, mode - ) + f"The mode of grid sample function should be in {_modes}, but got: {mode}" ) if padding_mode not in _padding_modes: raise ValueError( @@ -280,9 +278,7 @@ def grid_sample( if not isinstance(align_corners, bool): raise ValueError( - "The align corners should be bool, but got: {}".format( - align_corners - ) + f"The align corners should be bool, but got: {align_corners}" ) cudnn_version = get_cudnn_version() @@ -371,7 +367,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - "But recevie Attr(data_format): {} ".format(data_format) + f"But recevie Attr(data_format): {data_format} " ) if in_dygraph_mode(): return _C_ops.pixel_shuffle(x, upscale_factor, data_format) @@ -419,9 +415,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): """ if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 4D tensor, but received x with the shape of {x.shape}" ) if not isinstance(downscale_factor, int): @@ -433,7 +427,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - "But recevie Attr(data_format): {} ".format(data_format) + f"But recevie Attr(data_format): {data_format} " ) if in_dygraph_mode(): @@ -499,9 +493,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None): """ if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 4D tensor, but received x with the shape of {x.shape}" ) if not isinstance(groups, int): @@ -513,7 +505,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - "But recevie Attr(data_format): {} ".format(data_format) + f"But recevie Attr(data_format): {data_format} " ) if in_dygraph_mode(): diff --git a/python/paddle/nn/initializer/initializer.py b/python/paddle/nn/initializer/initializer.py index c77d6ae466637..6f37e95a79816 100644 --- a/python/paddle/nn/initializer/initializer.py +++ b/python/paddle/nn/initializer/initializer.py @@ -160,7 +160,5 @@ def calculate_gain(nonlinearity, param=None): return recommended_gain[nonlinearity] else: raise ValueError( - "nonlinearity function {} is not suppported now.".format( - nonlinearity - ) + f"nonlinearity function {nonlinearity} is not suppported now." ) diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 4bcb19ea95c74..d8a4d0e6fedd0 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -693,9 +693,7 @@ def forward(self, x): def extra_repr(self): name_str = f', name={self._name}' if self._name else '' - return 'scale={:.16f}, alpha={:.16f}{}'.format( - self._scale, self._alpha, name_str - ) + return f'scale={self._scale:.16f}, alpha={self._alpha:.16f}{name_str}' class LeakyReLU(Layer): @@ -890,9 +888,7 @@ def forward(self, x): def extra_repr(self): name_str = f', name={self._name}' if self._name else '' - return 'beta={}, threshold={}{}'.format( - self._beta, self._threshold, name_str - ) + return f'beta={self._beta}, threshold={self._threshold}{name_str}' class Softshrink(Layer): @@ -1546,9 +1542,7 @@ def __init__(self, name=None): def forward(self, x): assert ( x.ndim == 3 or x.ndim == 4 - ), "Softmax2D requires a 3D or 4D tensor as input. Received: {}D.".format( - x.ndim - ) + ), f"Softmax2D requires a 3D or 4D tensor as input. Received: {x.ndim}D." return F.softmax(x, axis=-3, dtype=self._dtype, name=self._name) def extra_repr(self): diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 0c55895d21253..f30ff14e502f1 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -517,9 +517,7 @@ def extra_repr(self): else: main_str = f'size={self.size}' name_str = f', name={self.name}' if self.name else '' - return '{}, data_format={}{}'.format( - main_str, self.data_format, name_str - ) + return f'{main_str}, data_format={self.data_format}{name_str}' class UpsamplingBilinear2D(Layer): @@ -606,9 +604,7 @@ def extra_repr(self): else: main_str = f'size={self.size}' name_str = f', name={self.name}' if self.name else '' - return '{}, data_format={}{}'.format( - main_str, self.data_format, name_str - ) + return f'{main_str}, data_format={self.data_format}{name_str}' class Bilinear(Layer): @@ -798,9 +794,7 @@ def forward(self, input): def extra_repr(self): name_str = f', name={self.name}' if self.name else '' - return 'p={}, axis={}, mode={}{}'.format( - self.p, self.axis, self.mode, name_str - ) + return f'p={self.p}, axis={self.axis}, mode={self.mode}{name_str}' class Dropout2D(Layer): @@ -876,9 +870,7 @@ def forward(self, input): def extra_repr(self): name_str = f', name={self.name}' if self.name else '' - return 'p={}, data_format={}{}'.format( - self.p, self.data_format, name_str - ) + return f'p={self.p}, data_format={self.data_format}{name_str}' class Dropout3D(Layer): @@ -956,9 +948,7 @@ def forward(self, input): def extra_repr(self): name_str = f', name={self.name}' if self.name else '' - return 'p={}, data_format={}{}'.format( - self.p, self.data_format, name_str - ) + return f'p={self.p}, data_format={self.data_format}{name_str}' class AlphaDropout(Layer): @@ -1224,9 +1214,7 @@ def forward(self, x): def extra_repr(self): name_str = f', name={self._name}' if self._name else '' - return 'padding={}, data_format={}{}'.format( - self._pad, self._data_format, name_str - ) + return f'padding={self._pad}, data_format={self._data_format}{name_str}' class Pad3D(Layer): @@ -1496,9 +1484,7 @@ def __init__( if padding_idx >= num_embeddings or padding_idx < -num_embeddings: raise ValueError( - "padding_idx must be within [-{}, {})".format( - num_embeddings, num_embeddings - ) + f"padding_idx must be within [-{num_embeddings}, {num_embeddings})" ) self._dtype = self._helper.get_default_dtype() diff --git a/python/paddle/nn/layer/container.py b/python/paddle/nn/layer/container.py index 09fefb227eceb..1f2986a6395d5 100644 --- a/python/paddle/nn/layer/container.py +++ b/python/paddle/nn/layer/container.py @@ -505,9 +505,7 @@ def insert(self, index, sublayer): """ assert isinstance(index, int) and -len(self._sub_layers) <= index < len( self._sub_layers - ), "index should be an integer in range [{}, {})".format( - -len(self), len(self) - ) + ), f"index should be an integer in range [{-len(self)}, {len(self)})" index = self._get_abs_idx(index) for i in range(len(self._sub_layers), index, -1): diff --git a/python/paddle/nn/layer/layers.py b/python/paddle/nn/layer/layers.py index 623d8d0ed2140..9f4d7d037cf7f 100644 --- a/python/paddle/nn/layer/layers.py +++ b/python/paddle/nn/layer/layers.py @@ -1517,9 +1517,7 @@ def is_already_registered(is_pre_hook): if not isinstance(attrs, dict): raise TypeError( - "attrs should be type(dict), but received {}".format( - type(attrs).__name__ - ) + f"attrs should be type(dict), but received {type(attrs).__name__}" ) # NOTE: Overwrite behavior for same key. @@ -1587,9 +1585,7 @@ def _remove_if_exist(*dicts): if len(self._loaddict_holder) > 0: assert ( value.name in self._loaddict_holder - ), "Parameter not found, Can't not find [ {} ] in state_dict".format( - value.name - ) + ), f"Parameter not found, Can't not find [ {value.name} ] in state_dict" value.set_value(self._loaddict_holder[value.name]) @@ -1964,10 +1960,8 @@ def _check_match(key, param): if len(state) != len(param): missing_keys.append(key) raise ValueError( - "{} receieves the length of {}, " - "but the expected shape is {}".format( - key, len(state), len(param) - ) + f"{key} receieves the length of {len(state)}, " + f"but the expected shape is {len(param)}" ) else: match_keys.add(key) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 77b8dbdaaaaad..f8382ab13fe0e 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -617,7 +617,7 @@ def __init__(self, reduction='mean'): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'MSELoss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) self.reduction = reduction @@ -2009,7 +2009,7 @@ def __init__( if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'MultiMarginLoss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction) + f"but received {reduction}." ) self.p = p self.margin = margin diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 53e71bb26a3b3..9944a4b481126 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -109,9 +109,7 @@ def forward(self, input): ) def extra_repr(self): - return 'num_features={}, epsilon={}'.format( - self._num_features, self._epsilon - ) + return f'num_features={self._num_features}, epsilon={self._epsilon}' class InstanceNorm1D(_InstanceNormBase): @@ -202,9 +200,7 @@ def __init__( def _check_input_dim(self, input): if len(input.shape) != 2 and len(input.shape) != 3: raise ValueError( - 'expected 2D or 3D input (got {}D input)'.format( - len(input.shape) - ) + f'expected 2D or 3D input (got {len(input.shape)}D input)' ) @@ -692,9 +688,7 @@ def forward(self, input): ) def extra_repr(self): - return 'normalized_shape={}, epsilon={}'.format( - self._normalized_shape, self._epsilon - ) + return f'normalized_shape={self._normalized_shape}, epsilon={self._epsilon}' class _BatchNormBase(Layer): @@ -1279,9 +1273,7 @@ def _check_data_format(self, input): def _check_input_dim(self, input): if len(input.shape) != 2 and len(input.shape) != 3: raise ValueError( - 'expected 2D or 3D input (got {}D input)'.format( - len(input.shape) - ) + f'expected 2D or 3D input (got {len(input.shape)}D input)' ) @@ -1833,9 +1825,7 @@ def forward(self, input): return out def extra_repr(self): - main_str = 'size={}, alpha={}, beta={}, k={}'.format( - self.size, self.alpha, self.beta, self.k - ) + main_str = f'size={self.size}, alpha={self.alpha}, beta={self.beta}, k={self.k}' if self.data_format != 'NCHW': main_str += f', data_format={self.data_format}' if self.name is not None: @@ -1922,7 +1912,7 @@ def __init__( assert dim < len(self._weight_shape), ( "The input `dim` should be less than the " "length of `weight_shape`, but received dim=" - "{}".format(dim) + f"{dim}" ) h = self._weight_shape[self._dim] w = np.prod(self._weight_shape) // h diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index 5a8725771002c..3108aeebeded4 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -955,9 +955,7 @@ def forward(self, input): ) def extra_repr(self): - return 'output_size={}, return_mask={}'.format( - self.output_size, self.return_mask - ) + return f'output_size={self.output_size}, return_mask={self.return_mask}' class AdaptiveMaxPool2D(Layer): @@ -1041,8 +1039,8 @@ def forward(self, x): ) def extra_repr(self): - return 'output_size={}, return_mask={}'.format( - self._output_size, self._return_mask + return ( + f'output_size={self._output_size}, return_mask={self._return_mask}' ) @@ -1138,8 +1136,8 @@ def forward(self, x): ) def extra_repr(self): - return 'output_size={}, return_mask={}'.format( - self._output_size, self._return_mask + return ( + f'output_size={self._output_size}, return_mask={self._return_mask}' ) diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 1cafb1cf1b614..aa7f6d91edbfa 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -805,7 +805,7 @@ def __init__( if activation not in ["tanh", "relu"]: raise ValueError( "activation for SimpleRNNCell should be tanh or relu, " - "but get {}".format(activation) + f"but get {activation}" ) self.activation = activation self._activation_fn = paddle.tanh if activation == "tanh" else F.relu @@ -1285,10 +1285,8 @@ def __init__(self, cell_fw, cell_bw, time_major=False): self.cell_bw = cell_bw if cell_fw.input_size != cell_bw.input_size: raise ValueError( - "input size of forward cell({}) does not equals" - "that of backward cell({})".format( - cell_fw.input_size, cell_bw.input_size - ) + f"input size of forward cell({cell_fw.input_size}) does not equals" + f"that of backward cell({cell_bw.input_size})" ) for cell in [self.cell_fw, self.cell_bw]: if not hasattr(cell, "call"): @@ -1380,7 +1378,7 @@ def __init__( else: raise ValueError( "direction should be forward or bidirect (or bidirectional), " - "received direction = {}".format(direction) + f"received direction = {direction}" ) self.could_use_cudnn = True diff --git a/python/paddle/nn/layer/transformer.py b/python/paddle/nn/layer/transformer.py index 5cd1902fce318..0c70922ce6a67 100644 --- a/python/paddle/nn/layer/transformer.py +++ b/python/paddle/nn/layer/transformer.py @@ -171,11 +171,11 @@ def __init__( assert embed_dim > 0, ( "Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim) + f"but received {embed_dim}" ) assert num_heads > 0, ( "Expected num_heads to be greater than 0, " - "but received {}".format(num_heads) + f"but received {num_heads}" ) self.embed_dim = embed_dim @@ -524,19 +524,15 @@ def __init__( super().__init__() - assert ( - d_model > 0 - ), "Expected d_model to be greater than 0, " "but received {}".format( - d_model + assert d_model > 0, ( + "Expected d_model to be greater than 0, " f"but received {d_model}" ) - assert ( - nhead > 0 - ), "Expected nhead to be greater than 0, " "but received {}".format( - nhead + assert nhead > 0, ( + "Expected nhead to be greater than 0, " f"but received {nhead}" ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward) + f"but received {dim_feedforward}" ) attn_dropout = dropout if attn_dropout is None else attn_dropout @@ -854,19 +850,15 @@ def __init__( super().__init__() - assert ( - d_model > 0 - ), "Expected d_model to be greater than 0, " "but received {}".format( - d_model + assert d_model > 0, ( + "Expected d_model to be greater than 0, " f"but received {d_model}" ) - assert ( - nhead > 0 - ), "Expected nhead to be greater than 0, " "but received {}".format( - nhead + assert nhead > 0, ( + "Expected nhead to be greater than 0, " f"but received {nhead}" ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward) + f"but received {dim_feedforward}" ) attn_dropout = dropout if attn_dropout is None else attn_dropout @@ -1294,19 +1286,15 @@ def __init__( ): super().__init__() - assert ( - d_model > 0 - ), "Expected d_model to be greater than 0, " "but received {}".format( - d_model + assert d_model > 0, ( + "Expected d_model to be greater than 0, " f"but received {d_model}" ) - assert ( - nhead > 0 - ), "Expected nhead to be greater than 0, " "but received {}".format( - nhead + assert nhead > 0, ( + "Expected nhead to be greater than 0, " f"but received {nhead}" ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward) + f"but received {dim_feedforward}" ) if isinstance(bias_attr, (list, tuple)): diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index 996699e513c37..faa946dfab57b 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -66,7 +66,7 @@ def __init__(self, upscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Data format should be 'NCHW' or 'NHWC'." - "But recevie data format: {}".format(data_format) + f"But recevie data format: {data_format}" ) self._upscale_factor = upscale_factor @@ -132,7 +132,7 @@ def __init__(self, downscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Data format should be 'NCHW' or 'NHWC'." - "But recevie data format: {}".format(data_format) + f"But recevie data format: {data_format}" ) self._downscale_factor = downscale_factor @@ -211,7 +211,7 @@ def __init__(self, groups, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Data format should be 'NCHW' or 'NHWC'." - "But recevie data format: {}".format(data_format) + f"But recevie data format: {data_format}" ) self._groups = groups diff --git a/python/paddle/nn/utils/spectral_norm_hook.py b/python/paddle/nn/utils/spectral_norm_hook.py index 050f2a533f98d..4c39154c6ab0f 100644 --- a/python/paddle/nn/utils/spectral_norm_hook.py +++ b/python/paddle/nn/utils/spectral_norm_hook.py @@ -34,7 +34,7 @@ def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12): if n_power_iterations <= 0: raise ValueError( 'Expected n_power_iterations to be positive, but ' - 'got n_power_iterations={}'.format(n_power_iterations) + f'got n_power_iterations={n_power_iterations}' ) self.n_power_iterations = n_power_iterations self.eps = eps @@ -104,7 +104,7 @@ def apply(layer, name, n_power_iterations, dim, eps): if isinstance(hook, SpectralNorm) and hook.name == name: raise RuntimeError( "Cannot register two spectral_norm hooks on " - "the same parameter {}".format(name) + f"the same parameter {name}" ) fn = SpectralNorm(name, n_power_iterations, dim, eps) diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index 4ef5fdf2deefd..acbb6a57289d9 100644 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -113,7 +113,7 @@ def apply(layer, name, dim): if isinstance(hook, WeightNorm) and hook.name == name: raise RuntimeError( "Cannot register two weight_norm hooks on " - "the same parameter {}".format(name) + f"the same parameter {name}" ) if dim is None: diff --git a/python/paddle/onnx/export.py b/python/paddle/onnx/export.py index c108e56ebee3f..433d6147847ea 100644 --- a/python/paddle/onnx/export.py +++ b/python/paddle/onnx/export.py @@ -95,7 +95,7 @@ def export(layer, path, input_spec=None, opset_version=9, **configs): raise ValueError( "The input path MUST be format of dirname/file_prefix " "[dirname\\file_prefix in Windows system], but " - "the file_prefix is empty in received path: {}".format(path) + f"the file_prefix is empty in received path: {path}" ) save_file = path + '.onnx' @@ -104,5 +104,5 @@ def export(layer, path, input_spec=None, opset_version=9, **configs): save_file, input_spec=input_spec, opset_version=opset_version, - **configs + **configs, ) diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 113b95eec2598..6fb777447f8a1 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1889,9 +1889,7 @@ def __init__( # Check type and value of total_steps if not isinstance(total_steps, int): raise TypeError( - "'total_step' must be 'int', but received {}".format( - type(total_steps) - ) + f"'total_step' must be 'int', but received {type(total_steps)}" ) if total_steps <= 0: raise ValueError("'total_step' must be a positive integer.") @@ -1900,15 +1898,11 @@ def __init__( # Check type and value of pac_start if not isinstance(phase_pct, float): raise TypeError( - "'phase_pct' must be 'float', but received {}".format( - type(phase_pct) - ) + f"'phase_pct' must be 'float', but received {type(phase_pct)}" ) if phase_pct < 0 or phase_pct > 1: raise ValueError( - "'phase_pct' must be between 0 and 1, but received {}".format( - phase_pct - ) + f"'phase_pct' must be between 0 and 1, but received {phase_pct}" ) # Check type and value of divide_factor @@ -2163,9 +2157,7 @@ def __init__( # check type of exp_gamma if not isinstance(exp_gamma, float): raise TypeError( - "The type of 'exp_gamma' must be float, but received {}".format( - type(exp_gamma) - ) + f"The type of 'exp_gamma' must be float, but received {type(exp_gamma)}" ) step_size_up = float(step_size_up) diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 45a1069750bd9..cc758a4d2159d 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -396,9 +396,7 @@ def set_state_dict(self, state_dict): load_para_np = load_para else: raise RuntimeError( - "State dict type {} not supprt".format( - str(type(load_para)) - ) + f"State dict type {str(type(load_para))} not supprt" ) assert ( @@ -844,9 +842,7 @@ def _add_accumulator( if framework.in_dygraph_mode(): return self._accumulators[name][param.name] raise Exception( - "Accumulator {} already exists for parameter {}".format( - name, param.name - ) + f"Accumulator {name} already exists for parameter {param.name}" ) if shape is None: shape = param.shape @@ -892,9 +888,7 @@ def _add_accumulator( if len(self._accumulators_holder) > 0: assert ( var_name in self._accumulators_holder - ), "Optimizer set error, {} should in state dict".format( - var_name - ) + ), f"Optimizer set error, {var_name} should in state dict" var.set_value(self._accumulators_holder.pop(var_name)) self._accumulators[name][param.name] = var @@ -917,9 +911,7 @@ def _get_accumulator(self, name, param): or param.name not in self._accumulators[name] ): raise Exception( - "Accumulator {} does not exist for parameter {}".format( - name, param.name - ) + f"Accumulator {name} does not exist for parameter {param.name}" ) return self._accumulators[name][param.name] @@ -945,9 +937,7 @@ def _get_accumulator_master(self, name, param): or target_name not in self._accumulators[name] ): raise Exception( - "Accumulator {} does not exist for parameter {}".format( - name, target_name - ) + f"Accumulator {name} does not exist for parameter {target_name}" ) return self._accumulators[name][target_name] diff --git a/python/paddle/profiler/profiler.py b/python/paddle/profiler/profiler.py index 63fcaeffebcd4..e6b4f2b9347c3 100644 --- a/python/paddle/profiler/profiler.py +++ b/python/paddle/profiler/profiler.py @@ -249,17 +249,13 @@ def export_chrome_tracing( os.makedirs(dir_name, exist_ok=True) except Exception: raise RuntimeError( - "Can not create directory '{}' for saving profiling results.".format( - dir_name - ) + f"Can not create directory '{dir_name}' for saving profiling results." ) def handle_fn(prof): nonlocal worker_name if not worker_name: - worker_name = "host_{}pid_{}".format( - socket.gethostname(), str(os.getpid()) - ) + worker_name = f"host_{socket.gethostname()}pid_{str(os.getpid())}" now = datetime.datetime.now() filename = '{}_time_{}.paddle_trace.json'.format( worker_name, now.strftime('%Y_%m_%d_%H_%M_%S_%f') @@ -307,17 +303,13 @@ def export_protobuf( os.makedirs(dir_name, exist_ok=True) except Exception: raise RuntimeError( - "Can not create directory '{}' for saving profiling results.".format( - dir_name - ) + f"Can not create directory '{dir_name}' for saving profiling results." ) def handle_fn(prof): nonlocal worker_name if not worker_name: - worker_name = "host_{}pid_{}".format( - socket.gethostname(), str(os.getpid()) - ) + worker_name = f"host_{socket.gethostname()}pid_{str(os.getpid())}" now = datetime.datetime.now() filename = '{}_time_{}.paddle_trace.pb'.format( worker_name, now.strftime('%Y_%m_%d_%H_%M_%S_%f') @@ -501,9 +493,7 @@ def __init__( if target not in supported_targets: self.targets.remove(target) warn( - "Profiling {} is not supported in current context.".format( - target - ) + f"Profiling {target} is not supported in current context." ) else: self.targets = supported_targets diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index 23c38d804f1a1..cbedd7d30a627 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -276,9 +276,7 @@ def print_layer_tree(node, depth=0): flops_n = _format_large_number(node.flops) flops_s = _format_large_number(node.flops * 1e9 / node.cpu_time) ret.append( - "{}{} latency: {}, FLOPs: {}, FLOPS: {}\n".format( - align, name, tm, flops_n, flops_s - ) + f"{align}{name} latency: {tm}, FLOPs: {flops_n}, FLOPS: {flops_s}\n" ) for n in node[1:]: diff --git a/python/paddle/profiler/timer.py b/python/paddle/profiler/timer.py index 3fd4eeafde36f..eace45478d38c 100644 --- a/python/paddle/profiler/timer.py +++ b/python/paddle/profiler/timer.py @@ -388,9 +388,8 @@ def step_info(self, unit): message += ' {}: {:.5f} s'.format('batch_cost', batch_average) speed_average = self.current_event.speed_average() if speed_average: - message += ' ips: {:.3f} {}'.format( - speed_average, - self.current_event.speed_unit, + message += ( + f' ips: {speed_average:.3f} {self.current_event.speed_unit}' ) self.current_event.reset() return message diff --git a/python/paddle/signal.py b/python/paddle/signal.py index a6aa6f112d3dd..8e64bc2e3400a 100644 --- a/python/paddle/signal.py +++ b/python/paddle/signal.py @@ -374,9 +374,7 @@ def stft( assert pad_mode in [ 'constant', 'reflect', - ], 'pad_mode should be "reflect" or "constant", but got "{}".'.format( - pad_mode - ) + ], f'pad_mode should be "reflect" or "constant", but got "{pad_mode}".' pad_length = n_fft // 2 # FIXME: Input `x` can be a complex tensor but pad does not support complex input. @@ -517,9 +515,7 @@ def istft( assert x_rank in [ 2, 3, - ], 'x should be a 2D or 3D complex tensor, but got rank of x is {}'.format( - x_rank - ) + ], f'x should be a 2D or 3D complex tensor, but got rank of x is {x_rank}' if x_rank == 2: # (batch, n_fft, n_frames) x = x.unsqueeze(0) @@ -533,15 +529,11 @@ def istft( # Assure no gaps between frames. assert ( 0 < hop_length <= win_length - ), 'hop_length should be in (0, win_length({})], but got {}.'.format( - win_length, hop_length - ) + ), f'hop_length should be in (0, win_length({win_length})], but got {hop_length}.' assert ( 0 < win_length <= n_fft - ), 'win_length should be in (0, n_fft({})], but got {}.'.format( - n_fft, win_length - ) + ), f'win_length should be in (0, n_fft({n_fft})], but got {win_length}.' n_frames = x.shape[-1] fft_size = x.shape[-2] diff --git a/python/paddle/sparse/creation.py b/python/paddle/sparse/creation.py index 9d567496c5da5..f5a7fe306723f 100644 --- a/python/paddle/sparse/creation.py +++ b/python/paddle/sparse/creation.py @@ -155,9 +155,7 @@ def sparse_coo_tensor( shape = list(shape) if shape < min_shape: raise ValueError( - "the minimun shape required is {}, but get {}".format( - min_shape, shape - ) + f"the minimun shape required is {min_shape}, but get {shape}" ) if len(shape) != sparse_dim + dense_dim: raise ValueError( @@ -247,9 +245,7 @@ def sparse_csr_tensor( if len(shape) != 2 and len(shape) != 3: raise ValueError( - "SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}".format( - shape - ) + f"SparseCsrTensor only support 2-D or 3-D matrix. but get shape {shape}" ) rows = shape[len(shape) - 2] diff --git a/python/paddle/sparse/nn/functional/conv.py b/python/paddle/sparse/nn/functional/conv.py index 91fe020eeea47..ccbe8ca8f003e 100644 --- a/python/paddle/sparse/nn/functional/conv.py +++ b/python/paddle/sparse/nn/functional/conv.py @@ -43,28 +43,24 @@ def _conv3d( if data_format not in ["NDHWC"]: raise ValueError( "Attr(data_format) should be 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format) + f"Attr(data_format): {data_format}." ) if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 5D tensor, but received x with the shape of {x.shape}" ) channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 5D tensor, but received x with the shape of {x.shape}" ) num_channels = x.shape[channel_dim] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) should be defined. " + f"Received: {num_channels}." ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims) @@ -136,28 +132,24 @@ def _conv2d( if data_format not in ["NHWC"]: raise ValueError( "Attr(data_format) should be 'NHWC'. Received " - "Attr(data_format): {}.".format(data_format) + f"Attr(data_format): {data_format}." ) if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 4D tensor, but received x with the shape of {x.shape}" ) channel_last = data_format == "NHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}".format( - x.shape - ) + f"Input x should be 4D tensor, but received x with the shape of {x.shape}" ) num_channels = x.shape[channel_dim] if num_channels < 0: raise ValueError( - "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels) + f"The channel dimension of the input({x.shape}) should be defined. " + f"Received: {num_channels}." ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims) diff --git a/python/paddle/sparse/unary.py b/python/paddle/sparse/unary.py index c49a7b03d26b4..9f9bafc135b63 100644 --- a/python/paddle/sparse/unary.py +++ b/python/paddle/sparse/unary.py @@ -1201,8 +1201,8 @@ def svd_lowrank(x, q=6, niter=2, M=None): q = min(6, m, n) elif not (q >= 0 and q <= min(m, n)): raise ValueError( - 'q(={}) must be non-negative integer' - ' and not greater than min(m, n)={}'.format(q, min(m, n)) + f'q(={q}) must be non-negative integer' + f' and not greater than min(m, n)={min(m, n)}' ) if not (niter >= 0): raise ValueError(f'niter(={niter}) must be non-negative integer') diff --git a/python/paddle/static/amp/bf16/amp_utils.py b/python/paddle/static/amp/bf16/amp_utils.py index 071328435e939..bb8d62d85b8cd 100644 --- a/python/paddle/static/amp/bf16/amp_utils.py +++ b/python/paddle/static/amp/bf16/amp_utils.py @@ -341,16 +341,12 @@ def cast_model_to_bf16( in_var = block.var(in_var_name) except ValueError as e: _logger.debug( - "-- {}, try to get it in the global block --".format( - e - ) + f"-- {e}, try to get it in the global block --" ) in_var = global_block.var(in_var_name) if in_var is not None: _logger.debug( - "-- var {} is got in the global block --".format( - in_var_name - ) + f"-- var {in_var_name} is got in the global block --" ) if in_var is None or in_var.type not in _valid_types: @@ -379,16 +375,12 @@ def cast_model_to_bf16( out_var = block.var(out_var_name) except ValueError as e: _logger.debug( - "-- {}, try to get it in the global block --".format( - e - ) + f"-- {e}, try to get it in the global block --" ) out_var = global_block.var(out_var_name) if out_var is not None: _logger.debug( - "-- var {} is got in the global block --".format( - out_var_name - ) + f"-- var {out_var_name} is got in the global block --" ) if out_var is None or out_var.type not in _valid_types: diff --git a/python/paddle/static/amp/fp16_utils.py b/python/paddle/static/amp/fp16_utils.py index 5119425348b42..77a522b0c293b 100644 --- a/python/paddle/static/amp/fp16_utils.py +++ b/python/paddle/static/amp/fp16_utils.py @@ -492,9 +492,7 @@ def get_promote_dtype(op, amp_dtype, block): # for ipu, all inputs must be converted to fp16 if not core.is_compiled_with_ipu() and _keep_fp32_input(op, in_name): _logger.debug( - "---- Input {} {} should be kept fp32 ----".format( - in_name, op.input(in_name) - ) + f"---- Input {in_name} {op.input(in_name)} should be kept fp32 ----" ) continue # if this op has inputs diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 27682416f8c1b..8f68f3f9e89bf 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -1304,9 +1304,7 @@ def load_vars( var_path = os.path.join(dirname, new_var.name) if not os.path.exists(var_path): raise ValueError( - "SelectedRows var {} can not find at {}".format( - new_var.name, var_path - ) + f"SelectedRows var {new_var.name} can not find at {var_path}" ) if os.path.isfile(var_path): @@ -1441,9 +1439,7 @@ def save(program, model_path, protocol=4, **configs): if not isinstance(protocol, int): raise ValueError( - "The 'protocol' MUST be `int`, but received {}".format( - type(protocol) - ) + f"The 'protocol' MUST be `int`, but received {type(protocol)}" ) if protocol < 2 or protocol > 4: @@ -1677,9 +1673,7 @@ def set_var(var, ndarray): for v in parameter_list: assert ( v.name in load_dict - ), "Can not find [{}] in model file [{}]".format( - v.name, parameter_file_name - ) + ), f"Can not find [{v.name}] in model file [{parameter_file_name}]" set_var(v, load_dict[v.name]) optimizer_var_list = list( @@ -1702,9 +1696,7 @@ def set_var(var, ndarray): for v in optimizer_var_list: assert ( v.name in load_dict - ), "Can not find [{}] in model file [{}]".format( - v.name, opt_file_name - ) + ), f"Can not find [{v.name}] in model file [{opt_file_name}]" set_var(v, load_dict[v.name]) @@ -1753,9 +1745,7 @@ def set_program_state(program, state_dict): var_temp = paddle.base.global_scope().find_var(para.name) assert ( var_temp is not None - ), "Variable [ {} ] Not found, Please make sure run startup program".format( - para.name - ) + ), f"Variable [ {para.name} ] Not found, Please make sure run startup program" if para.name in state_dict: # set value from state dict orig_para_np = np.array(var_temp.get_tensor()) diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index e0c189b11e356..21b5b56729b86 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -901,8 +901,7 @@ def conv2d( ) if len(input.shape) != 4: raise ValueError( - "Input size should be 4, " - "but received {}".format(len(input.shape)) + "Input size should be 4, " f"but received {len(input.shape)}" ) num_channels = input.shape[1] if not isinstance(use_cudnn, bool): @@ -931,7 +930,7 @@ def conv2d( elif groups <= 0: raise ValueError( "the groups of input must be greater than 0, " - "but received the groups of input is {}".format(groups) + f"but received the groups of input is {groups}" ) else: if num_channels % groups != 0: @@ -1020,8 +1019,8 @@ def _get_default_param_initializer(): if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" - " received {}, please check the input shape and " - "filter size.".format(filter_elem_num) + f" received {filter_elem_num}, please check the input shape and " + "filter size." ) std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std) @@ -1246,9 +1245,7 @@ def conv3d( num_filter_channels = num_channels elif groups <= 0: raise ValueError( - "the groups of conv3d should be greater than 0. Received groups: {}".format( - groups - ) + f"the groups of conv3d should be greater than 0. Received groups: {groups}" ) else: if num_channels % groups != 0: @@ -1325,8 +1322,8 @@ def _get_default_param_initializer(): if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" - " received {}, please check the input shape and " - "filter size.".format(filter_elem_num) + f" received {filter_elem_num}, please check the input shape and " + "filter size." ) std = (2.0 / filter_elem_num) ** 0.5 @@ -1554,8 +1551,7 @@ def conv2d_transpose( ), "param_attr should not be False in conv2d_transpose." if len(input.shape) != 4: raise ValueError( - "Input size should be 4, " - "but received {}".format(len(input.shape)) + "Input size should be 4, " f"but received {len(input.shape)}" ) if num_filters == 0: @@ -1712,7 +1708,7 @@ def _update_padding(padding, data_format): elif groups <= 0: raise ValueError( "the groups of input must be greater than 0, " - "but received the groups of input is {}".format(groups) + f"but received the groups of input is {groups}" ) filter_shape = [input_channel, num_filters // groups] + filter_size @@ -2075,9 +2071,7 @@ def _update_padding(padding, data_format): if num_filters % groups != 0: raise ValueError( "Attr(num_filters) must be divisible by groups," - "Received: Attr(num_filters) is {}, the groups is {}".format( - num_filters, groups - ) + f"Received: Attr(num_filters) is {num_filters}, the groups is {groups}" ) filter_shape = [input_channel, num_filters // groups] + filter_size @@ -2303,8 +2297,8 @@ def _get_default_param_initializer(): if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" - " received {}, please check the input shape and " - "filter size.".format(filter_elem_num) + f" received {filter_elem_num}, please check the input shape and " + "filter size." ) std = (2.0 / filter_elem_num) ** 0.5 return paddle.nn.initializer.normal.Normal(0.0, std) @@ -3010,7 +3004,7 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " - "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format) + f"'NLC', 'NHWC', 'NDHWC' but receive {data_format}" ) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' @@ -3324,9 +3318,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): for v in skip_vars_in_backward_input: if v.name not in fwd_in_out: raise ValueError( - 'Tensor {} is not found in forward inputs and outputs'.format( - v.name - ) + f'Tensor {v.name} is not found in forward inputs and outputs' ) backward_skip_vars.add(v.name) diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 87f9ae321d6f8..0d0afcc71c150 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -750,14 +750,8 @@ def create_var_like(o_var): def _error_message(what, arg_name, op_name, right_value, error_value): error_message = ( - "{what} of '{arg_name}' in {op_name} must be " - "{right_value}, but received: {error_value}.".format( - what=what, - arg_name=arg_name, - op_name=op_name, - right_value=right_value, - error_value=error_value, - ) + f"{what} of '{arg_name}' in {op_name} must be " + f"{right_value}, but received: {error_value}." ) return error_message @@ -871,8 +865,8 @@ def _case_check_args(pred_fn_pairs, default): if not callable(fn): raise TypeError( - "The fn for {} of pred_fn_pairs in Op(case) must" - " be callable.".format(pred.name) + f"The fn for {pred.name} of pred_fn_pairs in Op(case) must" + " be callable." ) if default is None: diff --git a/python/paddle/static/quantization/quantization_pass.py b/python/paddle/static/quantization/quantization_pass.py index 4387732362f9a..ac318440aca57 100644 --- a/python/paddle/static/quantization/quantization_pass.py +++ b/python/paddle/static/quantization/quantization_pass.py @@ -1796,9 +1796,7 @@ def apply(self, graph): scale_var = self._scope.find_var(scale_name) assert ( scale_var is not None - ), "Can not find {} variable in the scope".format( - scale_name - ) + ), f"Can not find {scale_name} variable in the scope" scale_value = np.array(scale_var.get_tensor())[0] # For compatibility, we save output threshold by two methods. diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 33c46f98ef3cf..286dcd261d8fe 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -107,10 +107,8 @@ def transpose(x, perm, name=None): raise ValueError( "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " - "but received dimension of Input(x) is {}, " - "the length of Input(perm) is {}.".format( - len(x.shape), len(perm) - ) + f"but received dimension of Input(x) is {len(x.shape)}, " + f"the length of Input(perm) is {len(perm)}." ) for idx, dim in enumerate(perm): if dim >= len(x.shape): @@ -594,9 +592,7 @@ def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None): ) else: raise ValueError( - "unspport p for p-order vector norm. except float, found {}".format( - p - ) + f"unspport p for p-order vector norm. except float, found {p}" ) # calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: @@ -616,9 +612,7 @@ def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None): ) else: raise ValueError( - "except axis type int or list (length of list <=2), found {}".format( - axis - ) + f"except axis type int or list (length of list <=2), found {axis}" ) @@ -1255,7 +1249,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " - "min of Input(fweights) is {}.".format(fweights.min()) + f"min of Input(fweights) is {fweights.min()}." ) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") @@ -1280,7 +1274,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " - "min of Input(aweights) is {}.".format(aweights.min()) + f"min of Input(aweights) is {aweights.min()}." ) if w is not None: w = w * aw @@ -1834,9 +1828,7 @@ def __check_input(x, vec): vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( - "x should be 2-dimensional. But received x's dimention: {}".format( - x_shape - ) + f"x should be 2-dimensional. But received x's dimention: {x_shape}" ) if len(vec_shape) != 1: raise ValueError( @@ -2192,8 +2184,8 @@ def svd_lowrank(x, q=6, niter=2, M=None): q = min(6, m, n) elif not (q >= 0 and q <= min(m, n)): raise ValueError( - 'q(={}) must be non-negative integer' - ' and not greater than min(m, n)={}'.format(q, min(m, n)) + f'q(={q}) must be non-negative integer' + f' and not greater than min(m, n)={min(m, n)}' ) if not (niter >= 0): raise ValueError(f'niter(={niter}) must be non-negative integer') @@ -3669,8 +3661,8 @@ def cdist( ) assert x_shape[-1] == y_shape[-1], ( "The x and y must have same last dimension, " - "But received Input x's last dimension is {}, " - "Input y's last dimension is {}.\n".format(x_shape[-1], y_shape[-1]) + f"But received Input x's last dimension is {x_shape[-1]}, " + f"Input y's last dimension is {y_shape[-1]}.\n" ) assert p >= 0, ( "The p must be greater than or equal to 0, " diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 2e366150d3632..777a47968e591 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -330,9 +330,7 @@ def slice(input, axes, starts, ends): else: raise ValueError( - "Input axes must be a python list or tuple, but reveived {}".format( - type(axes) - ) + f"Input axes must be a python list or tuple, but reveived {type(axes)}" ) infer_flags = [1 for i in range(len(axes))] @@ -502,10 +500,8 @@ def transpose(x, perm, name=None): raise ValueError( "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " - "but received dimension of Input(x) is {}, " - "the length of Input(perm) is {}.".format( - len(x.shape), len(perm) - ) + f"but received dimension of Input(x) is {len(x.shape)}, " + f"the length of Input(perm) is {len(perm)}." ) for idx, dim in enumerate(perm): if dim >= len(x.shape): @@ -553,9 +549,7 @@ def unstack(x, axis=0, num=None): """ if not (-x.ndim <= axis < x.ndim): - raise ValueError( - '`axis` must be in the range [-{0}, {0})'.format(x.ndim) - ) + raise ValueError(f'`axis` must be in the range [-{x.ndim}, {x.ndim})') if num is not None and (num < 0 or num > x.shape[axis]): raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})') if in_dynamic_mode(): @@ -1451,15 +1445,11 @@ def rot90(x, k=1, axes=[0, 1], name=None): total_rot_dims = len(axes) if total_rot_dims != 2: raise ValueError( - "expected total rotation axes == 2, but got axes = {}".format( - total_rot_dims - ) + f"expected total rotation axes == 2, but got axes = {total_rot_dims}" ) if input_total_dims < 2: raise ValueError( - "expected total dims >= 2, but got total dims = {}".format( - input_total_dims - ) + f"expected total dims >= 2, but got total dims = {input_total_dims}" ) if not (axes[0] != axes[1] and abs(axes[0] - axes[1]) != input_total_dims): @@ -2130,9 +2120,7 @@ def vsplit(x, num_or_sections, name=None): """ if x.ndim < 2: raise ValueError( - "The input tensor's dimension must be greater than 1, but got {}".format( - x.ndim - ) + f"The input tensor's dimension must be greater than 1, but got {x.ndim}" ) return split(x, num_or_sections, axis=0, name=name) @@ -3720,7 +3708,7 @@ def reshape_(x, shape, name=None): else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," - " got '{}.'".format(type(shape)) + f" got '{type(shape)}.'" ) return out @@ -4480,12 +4468,12 @@ def moveaxis(x, source, destination, name=None): if axis[0] < 0: assert ( axis[0] >= -ndim - ), "'source' must be in the range of [-{0}, {0})".format(ndim) + ), f"'source' must be in the range of [-{ndim}, {ndim})" src[i] += ndim else: assert ( axis[0] < ndim - ), "'source' must be in the range of [-{0}, {0})".format(ndim) + ), f"'source' must be in the range of [-{ndim}, {ndim})" assert isinstance( axis[1], int @@ -4493,12 +4481,12 @@ def moveaxis(x, source, destination, name=None): if axis[1] < 0: assert ( axis[1] >= -ndim - ), "'source' must be in the range of [-{0}, {0})".format(ndim) + ), f"'source' must be in the range of [-{ndim}, {ndim})" dst[i] += ndim else: assert ( axis[1] < ndim - ), "'source' must be in the range of [-{0}, {0})".format(ndim) + ), f"'source' must be in the range of [-{ndim}, {ndim})" perm[dst[i]] = src[i] src_dims.remove(src[i]) dst_dims.remove(dst[i]) @@ -4541,13 +4529,11 @@ def moveaxis(x, source, destination, name=None): def non_negative_axis(arr, axis): ndim = len(arr.shape) if axis >= 0: - assert ( - axis < ndim - ), "'axis' must be in the range of [-{0}, {0})".format(ndim) + assert axis < ndim, f"'axis' must be in the range of [-{ndim}, {ndim})" else: assert ( axis >= -ndim - ), "'axis' must be in the range of [-{0}, {0})".format(ndim) + ), f"'axis' must be in the range of [-{ndim}, {ndim})" axis += ndim return axis diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ce359b732e2c0..d772c890fbb8c 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -5770,9 +5770,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): """ if n < 1: raise ValueError( - "Diff expects input to be at least one-dimensional but got {}".format( - n - ) + f"Diff expects input to be at least one-dimensional but got {n}" ) def _diff_handler(x, n=1, axis=-1, prepend=None, append=None, name=None): @@ -6224,17 +6222,13 @@ def take(x, index, mode='raise', name=None): """ if mode not in ['raise', 'wrap', 'clip']: raise ValueError( - "'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}.".format( - mode - ) + f"'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {mode}." ) if in_dynamic_mode(): if not isinstance(index, (paddle.Tensor, Variable)): raise TypeError( - "The type of 'index' must be Tensor, but got {}".format( - type(index) - ) + f"The type of 'index' must be Tensor, but got {type(index)}" ) if index.dtype not in [paddle.int32, paddle.int64]: raise TypeError( diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index a9b5740b2fc30..6e173545a2767 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -126,15 +126,15 @@ def _format_item(np_var, max_width=0, signed=False): or np_var.dtype == np.float16 ): if DEFAULT_PRINT_OPTIONS.sci_mode: - item_str = '{{:.{}e}}'.format( - DEFAULT_PRINT_OPTIONS.precision - ).format(np_var) + item_str = f'{{:.{DEFAULT_PRINT_OPTIONS.precision}e}}'.format( + np_var + ) elif np.ceil(np_var) == np_var: item_str = f'{np_var:.0f}.' else: - item_str = '{{:.{}f}}'.format( - DEFAULT_PRINT_OPTIONS.precision - ).format(np_var) + item_str = f'{{:.{DEFAULT_PRINT_OPTIONS.precision}f}}'.format( + np_var + ) else: item_str = f'{np_var}' diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index 4ea6c9ad591d6..ddf69e9fa373b 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -650,9 +650,7 @@ def get_ext_filename(self, fullname): if self.no_python_abi_suffix: assert ( len(name_items) > 2 - ), "Expected len(name_items) > 2, but received {}".format( - len(name_items) - ) + ), f"Expected len(name_items) > 2, but received {len(name_items)}" name_items.pop(-2) ext_name = split_str.join(name_items) @@ -909,9 +907,7 @@ def load( extra_cuda_cflags = [] assert isinstance( extra_cxx_cflags, list - ), "Required type(extra_cxx_cflags) == list[str], but received {}".format( - extra_cxx_cflags - ) + ), f"Required type(extra_cxx_cflags) == list[str], but received {extra_cxx_cflags}" assert isinstance( extra_cuda_cflags, list ), "Required type(extra_cuda_cflags) == list[str], but received {}".format( diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index df2fd45b345ea..cb50f73d8d9b5 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -497,8 +497,8 @@ def _reset_so_rpath(so_path): if OS_NAME.startswith("darwin"): origin_runtime_path = "@loader_path/../libs/" rpath = f"@rpath/{_get_core_name()}" - cmd = 'install_name_tool -change {} {} {}'.format( - origin_runtime_path, rpath, so_path + cmd = ( + f'install_name_tool -change {origin_runtime_path} {rpath} {so_path}' ) run_cmd(cmd) @@ -512,9 +512,9 @@ def _get_include_dirs_when_compiling(compile_dir): include_dirs_file = 'includes.txt' path = os.path.abspath(compile_dir) include_dirs_file = os.path.join(path, include_dirs_file) - assert os.path.isfile(include_dirs_file), "File {} does not exist".format( + assert os.path.isfile( include_dirs_file - ) + ), f"File {include_dirs_file} does not exist" with open(include_dirs_file, 'r') as f: include_dirs = [line.strip() for line in f.readlines() if line.strip()] @@ -1314,27 +1314,19 @@ def _jit_compile(file_path, verbose=False): py_version = subprocess.check_output([interpreter, '-V']) py_version = py_version.decode() log_v( - "Using Python interpreter: {}, version: {}".format( - interpreter, py_version.strip() - ), + f"Using Python interpreter: {interpreter}, version: {py_version.strip()}", verbose, ) except Exception: _, error, _ = sys.exc_info() raise RuntimeError( - 'Failed to check Python interpreter with `{}`, errors: {}'.format( - interpreter, error - ) + f'Failed to check Python interpreter with `{interpreter}`, errors: {error}' ) if IS_WINDOWS: - compile_cmd = 'cd /d {} && {} {} build'.format( - ext_dir, interpreter, setup_file - ) + compile_cmd = f'cd /d {ext_dir} && {interpreter} {setup_file} build' else: - compile_cmd = 'cd {} && {} {} build'.format( - ext_dir, interpreter, setup_file - ) + compile_cmd = f'cd {ext_dir} && {interpreter} {setup_file} build' print("Compiling user custom op, it will cost a few seconds.....") run_cmd(compile_cmd, verbose) @@ -1437,9 +1429,7 @@ def check_abi_compatibility(compiler, verbose=False): # check compiler version failed _, error, _ = sys.exc_info() warnings.warn( - 'Failed to check compiler version for {}: {}'.format( - compiler, error - ) + f'Failed to check compiler version for {compiler}: {error}' ) return False diff --git a/python/paddle/utils/deprecated.py b/python/paddle/utils/deprecated.py index c482484dce6ab..873c6b3a6a9fc 100755 --- a/python/paddle/utils/deprecated.py +++ b/python/paddle/utils/deprecated.py @@ -55,7 +55,7 @@ def decorator(func): assert isinstance(reason, str), 'type of "reason" must be str.' assert isinstance(level, int) and level >= 0 and level < 3, ( 'type of "level" must be int and must be one of 0, 1, 2. But ' - 'received: {}.'.format(level) + f'received: {level}.' ) _since = since.strip() @@ -92,9 +92,7 @@ def wrapper(*args, **kwargs): if level == 2: raise RuntimeError( - 'API "{}.{}" has been deprecated.'.format( - func.__module__, func.__name__ - ) + f'API "{func.__module__}.{func.__name__}" has been deprecated.' ) warningmsg = "\033[93m\nWarning:\n%s \033[0m" % (msg) diff --git a/python/paddle/utils/dlpack.py b/python/paddle/utils/dlpack.py index ed196beedb356..3e291b438502f 100644 --- a/python/paddle/utils/dlpack.py +++ b/python/paddle/utils/dlpack.py @@ -53,7 +53,7 @@ def to_dlpack(x): if not isinstance(x, (paddle.Tensor, paddle.base.core.eager.Tensor)): raise TypeError( "The type of 'x' in to_dlpack must be paddle.Tensor," - " but received {}.".format(type(x)) + f" but received {type(x)}." ) return x.value().get_tensor()._to_dlpack() @@ -94,7 +94,7 @@ def from_dlpack(dlpack): if not dlpack_flag: raise TypeError( "The type of 'dlpack' in from_dlpack must be PyCapsule object," - " but received {}.".format(type(dlpack)) + f" but received {type(dlpack)}." ) if in_dygraph_mode(): diff --git a/python/paddle/utils/download.py b/python/paddle/utils/download.py index 95fb9b539a7a5..59efb656f6691 100644 --- a/python/paddle/utils/download.py +++ b/python/paddle/utils/download.py @@ -38,9 +38,7 @@ def update(self, n): if self.total is None: sys.stderr.write(f"\r{self.n:.1f} bytes") else: - sys.stderr.write( - "\r{:.1f}%".format(100 * self.n / float(self.total)) - ) + sys.stderr.write(f"\r{100 * self.n / float(self.total):.1f}%") sys.stderr.flush() def __enter__(self): @@ -172,8 +170,8 @@ def _get_download(url, fullname): ) as req: if req.status_code != 200: raise RuntimeError( - "Downloading from {} failed with code " - "{}!".format(url, req.status_code) + f"Downloading from {url} failed with code " + f"{req.status_code}!" ) tmp_fullname = fullname + "_tmp" @@ -193,9 +191,7 @@ def _get_download(url, fullname): except Exception as e: # requests.exceptions.ConnectionError logger.info( - "Downloading {} from {} failed with exception {}".format( - fname, url, str(e) - ) + f"Downloading {fname} from {url} failed with exception {str(e)}" ) return False @@ -204,9 +200,7 @@ def _wget_download(url, fullname): # using wget to download url tmp_fullname = fullname + "_tmp" # –user-agent - command = 'wget -O {} -t {} {}'.format( - tmp_fullname, DOWNLOAD_RETRY_LIMIT, url - ) + command = f'wget -O {tmp_fullname} -t {DOWNLOAD_RETRY_LIMIT} {url}' subprc = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) @@ -214,9 +208,7 @@ def _wget_download(url, fullname): if subprc.returncode != 0: raise RuntimeError( - '{} failed. Please make sure `wget` is installed or {} exists'.format( - command, url - ) + f'{command} failed. Please make sure `wget` is installed or {url} exists' ) shutil.move(tmp_fullname, fullname) @@ -240,9 +232,7 @@ def _download(url, path, md5sum=None, method='get'): method (str): which download method to use. Support `wget` and `get`. Default is `get`. """ - assert method in _download_methods, 'make sure `{}` implemented'.format( - method - ) + assert method in _download_methods, f'make sure `{method}` implemented' if not osp.exists(path): os.makedirs(path) @@ -258,7 +248,7 @@ def _download(url, path, md5sum=None, method='get'): retry_cnt += 1 else: raise RuntimeError( - "Download from {} failed. " "Retry limit reached".format(url) + f"Download from {url} failed. " "Retry limit reached" ) if not _download_methods[method](url, fullname): @@ -281,8 +271,8 @@ def _md5check(fullname, md5sum=None): if calc_md5sum != md5sum: logger.info( - "File {} md5 check failed, {}(calc) != " - "{}(base)".format(fullname, calc_md5sum, md5sum) + f"File {fullname} md5 check failed, {calc_md5sum}(calc) != " + f"{md5sum}(base)" ) return False return True diff --git a/python/paddle/utils/install_check.py b/python/paddle/utils/install_check.py index 184b49c05d2c4..4974eddbfa26c 100644 --- a/python/paddle/utils/install_check.py +++ b/python/paddle/utils/install_check.py @@ -60,7 +60,7 @@ def _is_cuda_available(): logging.warning( "You are using GPU version PaddlePaddle, but there is no GPU " "detected on your machine. Maybe CUDA devices is not set properly." - "\n Original Error is {}".format(e) + f"\n Original Error is {e}" ) return False @@ -76,7 +76,7 @@ def _is_xpu_available(): logging.warning( "You are using XPU version PaddlePaddle, but there is no XPU " "detected on your machine. Maybe XPU devices is not set properly." - "\n Original Error is {}".format(e) + f"\n Original Error is {e}" ) return False @@ -281,11 +281,7 @@ def run_check(): os.environ['PADDLE_DISTRI_BACKEND'] = "xccl" _run_parallel(device_list) - print( - "PaddlePaddle works well on {} {}s.".format( - device_count, device_str - ) - ) + print(f"PaddlePaddle works well on {device_count} {device_str}s.") print( "PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now." ) @@ -302,9 +298,7 @@ def run_check(): logging.warning(f"\n Original Error is: {e}") print( - "PaddlePaddle is installed successfully ONLY for single {}! " - "Let's start deep learning with PaddlePaddle now.".format( - device_str - ) + f"PaddlePaddle is installed successfully ONLY for single {device_str}! " + "Let's start deep learning with PaddlePaddle now." ) raise e diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index 4b8da4ee84249..e90273f3e82ad 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -289,7 +289,7 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): if is_sequence_nest1 != is_sequence(nest2): raise ValueError( "The two structures don't have the same nested structure.\n\n" - "First structure: {}\n\nSecond structure: {}.".format(nest1, nest2) + f"First structure: {nest1}\n\nSecond structure: {nest2}." ) if not is_sequence_nest1: return # finished checking diff --git a/python/paddle/vision/datasets/cifar.py b/python/paddle/vision/datasets/cifar.py index a787f1479a28a..b8f40a232d70a 100644 --- a/python/paddle/vision/datasets/cifar.py +++ b/python/paddle/vision/datasets/cifar.py @@ -121,9 +121,7 @@ def __init__( backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( - "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend - ) + f"Expected backend are one of ['pil', 'cv2'], but got {backend}" ) self.backend = backend diff --git a/python/paddle/vision/datasets/flowers.py b/python/paddle/vision/datasets/flowers.py index 94ff24850a8ef..07926d3b9a10b 100644 --- a/python/paddle/vision/datasets/flowers.py +++ b/python/paddle/vision/datasets/flowers.py @@ -125,9 +125,7 @@ def __init__( backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( - "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend - ) + f"Expected backend are one of ['pil', 'cv2'], but got {backend}" ) self.backend = backend diff --git a/python/paddle/vision/datasets/mnist.py b/python/paddle/vision/datasets/mnist.py index bfd20b66c8df9..c44d86ac771d6 100644 --- a/python/paddle/vision/datasets/mnist.py +++ b/python/paddle/vision/datasets/mnist.py @@ -119,9 +119,7 @@ def __init__( backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( - "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend - ) + f"Expected backend are one of ['pil', 'cv2'], but got {backend}" ) self.backend = backend diff --git a/python/paddle/vision/datasets/voc2012.py b/python/paddle/vision/datasets/voc2012.py index 18be80408368f..f421137c7e980 100644 --- a/python/paddle/vision/datasets/voc2012.py +++ b/python/paddle/vision/datasets/voc2012.py @@ -122,9 +122,7 @@ def __init__( backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( - "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend - ) + f"Expected backend are one of ['pil', 'cv2'], but got {backend}" ) self.backend = backend diff --git a/python/paddle/vision/image.py b/python/paddle/vision/image.py index e8dfd1d801355..fd46ba2953216 100644 --- a/python/paddle/vision/image.py +++ b/python/paddle/vision/image.py @@ -83,9 +83,7 @@ def set_image_backend(backend): global _image_backend if backend not in ['pil', 'cv2', 'tensor']: raise ValueError( - "Expected backend are one of ['pil', 'cv2', 'tensor'], but got {}".format( - backend - ) + f"Expected backend are one of ['pil', 'cv2', 'tensor'], but got {backend}" ) _image_backend = backend @@ -156,9 +154,7 @@ def image_load(path, backend=None): backend = _image_backend if backend not in ['pil', 'cv2', 'tensor']: raise ValueError( - "Expected backend are one of ['pil', 'cv2', 'tensor'], but got {}".format( - backend - ) + f"Expected backend are one of ['pil', 'cv2', 'tensor'], but got {backend}" ) if backend == 'pil': diff --git a/python/paddle/vision/models/densenet.py b/python/paddle/vision/models/densenet.py index 90346d4ff7493..f3332fabe3f7e 100644 --- a/python/paddle/vision/models/densenet.py +++ b/python/paddle/vision/models/densenet.py @@ -245,9 +245,7 @@ def __init__( supported_layers = [121, 161, 169, 201, 264] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" densenet_spec = { 121: (64, 32, [6, 12, 24, 16]), 161: (96, 48, [6, 12, 36, 24]), diff --git a/python/paddle/vision/models/mobilenetv3.py b/python/paddle/vision/models/mobilenetv3.py index a35058c9243f0..8c392ee5a696b 100644 --- a/python/paddle/vision/models/mobilenetv3.py +++ b/python/paddle/vision/models/mobilenetv3.py @@ -102,9 +102,7 @@ def __init__( self.activation_layer = nn.Hardswish else: raise RuntimeError( - "The activation function is not supported: {}".format( - activation - ) + f"The activation function is not supported: {activation}" ) self.stride = stride diff --git a/python/paddle/vision/models/squeezenet.py b/python/paddle/vision/models/squeezenet.py index 9133a19993421..5e18a0b6ec459 100644 --- a/python/paddle/vision/models/squeezenet.py +++ b/python/paddle/vision/models/squeezenet.py @@ -115,9 +115,7 @@ def __init__(self, version, num_classes=1000, with_pool=True): supported_versions = ['1.0', '1.1'] assert ( version in supported_versions - ), "supported versions are {} but input version is {}".format( - supported_versions, version - ) + ), f"supported versions are {supported_versions} but input version is {version}" if self.version == "1.0": self._conv = Conv2D( diff --git a/python/paddle/vision/transforms/functional_tensor.py b/python/paddle/vision/transforms/functional_tensor.py index c22ff07e922ed..06cd33419b65b 100644 --- a/python/paddle/vision/transforms/functional_tensor.py +++ b/python/paddle/vision/transforms/functional_tensor.py @@ -803,9 +803,7 @@ def resize(img, size, interpolation='bilinear', data_format='CHW'): # We should consider to support this case in future. if w <= 0 or h <= 0: raise NotImplementedError( - "Not support while w<=0 or h<=0, but received w={}, h={}".format( - w, h - ) + f"Not support while w<=0 or h<=0, but received w={w}, h={h}" ) if (w <= h and w == size) or (h <= w and h == size): return img diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index eebccb4c8decf..23f4645f9e2ed 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -53,9 +53,7 @@ def _check_input( if isinstance(value, numbers.Number): if value < 0: raise ValueError( - "If {} is a single number, it must be non negative.".format( - name - ) + f"If {name} is a single number, it must be non negative." ) value = [center - value, center + value] if clip_first_on_zero: @@ -65,9 +63,7 @@ def _check_input( raise ValueError(f"{name} values should be between {bound}") else: raise TypeError( - "{} should be a single number or a list/tuple with lenght 2.".format( - name - ) + f"{name} should be a single number or a list/tuple with lenght 2." ) if value[0] == value[1] == center: @@ -113,8 +109,8 @@ def __call__(self, data): except Exception as e: stack_info = traceback.format_exc() print( - "fail to perform transform [{}] with error: " - "{} and stack:\n{}".format(f, e, str(stack_info)) + f"fail to perform transform [{f}] with error: " + f"{e} and stack:\n{str(stack_info)}" ) raise e return data diff --git a/test/book/test_word2vec_book.py b/test/book/test_word2vec_book.py index bfa4a05b5e160..3c8a879bbc74b 100644 --- a/test/book/test_word2vec_book.py +++ b/test/book/test_word2vec_book.py @@ -35,9 +35,7 @@ def get_place(target): return base.CPUPlace() else: raise ValueError( - "Target `{}` is not on the support list: `cuda`, `xpu` and `cpu`.".format( - target - ) + f"Target `{target}` is not on the support list: `cuda`, `xpu` and `cpu`." ) diff --git a/test/cinn/op_mappers/op_mapper_test.py b/test/cinn/op_mappers/op_mapper_test.py index 584c26488484a..d77a1b4dc7bf0 100644 --- a/test/cinn/op_mappers/op_mapper_test.py +++ b/test/cinn/op_mappers/op_mapper_test.py @@ -166,16 +166,12 @@ def __check_valid(self): self.assertEqual( var.shape, self.feed_data[name].shape, - msg="The shape of input {} in feed_data is error".format( - var.name - ), + msg=f"The shape of input {var.name} in feed_data is error", ) self.assertEqual( self.paddleddtype2nptype(var.dtype), str(self.feed_data[name].dtype), - msg="The dtype of input {} in feed_data is error".format( - var.name - ), + msg=f"The dtype of input {var.name} in feed_data is error", ) for out_name, in_name in self.inplace_outputs.items(): diff --git a/test/cinn/passes/pass_test.py b/test/cinn/passes/pass_test.py index 099d5b2a07fec..b8a64ce00963d 100644 --- a/test/cinn/passes/pass_test.py +++ b/test/cinn/passes/pass_test.py @@ -64,9 +64,7 @@ def get_pass_outputs(self, passes): self.assertIn( var.name(), self.feed_data, - msg="Cannot found input data {} in self.feed_data".format( - var.name() - ), + msg=f"Cannot found input data {var.name()} in self.feed_data", ) feed_list.append(self.feed_data[var.name()]) @@ -95,9 +93,7 @@ def check_pass_outputs( logger.debug(f"Pass after base pass optimize has {base_pass_size} ops") test_pass_size = self.get_pass_size(base_passes + test_passes) logger.debug( - "Pass after base and test pass optimize has {} ops".format( - test_pass_size - ) + f"Pass after base and test pass optimize has {test_pass_size} ops" ) self.assertEqual( base_pass_size - test_pass_size, diff --git a/test/cinn/test_paddle_model_convertor.py b/test/cinn/test_paddle_model_convertor.py index 5e696785fb50f..b143c2dff1c9e 100644 --- a/test/cinn/test_paddle_model_convertor.py +++ b/test/cinn/test_paddle_model_convertor.py @@ -166,9 +166,7 @@ def load_paddle_program(self): logger.debug(msg=f"Param List: {self.param_vars.keys()}") logger.debug(msg=f"Feed List: {self.feed_names}") logger.debug( - msg="Fetch List: {}".format( - [var.name for var in self.fetch_targets] - ) + msg=f"Fetch List: {[var.name for var in self.fetch_targets]}" ) self.feed_shapes = [] diff --git a/test/collective/fleet/parallel_dygraph_se_resnext.py b/test/collective/fleet/parallel_dygraph_se_resnext.py index e9ee2407a3346..eaf8505360fb2 100644 --- a/test/collective/fleet/parallel_dygraph_se_resnext.py +++ b/test/collective/fleet/parallel_dygraph_se_resnext.py @@ -215,9 +215,7 @@ def __init__(self, layers=50, class_dim=102): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: cardinality = 32 diff --git a/test/collective/fleet/test_parallel_dygraph_pp_adaptor.py b/test/collective/fleet/test_parallel_dygraph_pp_adaptor.py index 4551887370b3b..bee93e11115d3 100644 --- a/test/collective/fleet/test_parallel_dygraph_pp_adaptor.py +++ b/test/collective/fleet/test_parallel_dygraph_pp_adaptor.py @@ -64,14 +64,10 @@ def check_converted_model(converted_model_dir, expected_model_dir): # expected model, which does not hinder model recovering for i in range(p_config1.pp): sub_converted_model_dir = ( - "{}/mp_00_sharding_00_pp_{:0>2d}".format( - converted_model_dir, i - ) + f"{converted_model_dir}/mp_00_sharding_00_pp_{i:0>2d}" ) sub_expected_model_dir = ( - "{}/mp_00_sharding_00_pp_{:0>2d}".format( - expected_model_dir, i - ) + f"{expected_model_dir}/mp_00_sharding_00_pp_{i:0>2d}" ) print( f"converted_model_dir: {sub_converted_model_dir}; expected_model_dir: {sub_expected_model_dir}" diff --git a/test/contrib/test_multi_precision_fp16_train.py b/test/contrib/test_multi_precision_fp16_train.py index 137f2269173f3..b85f1547cfe6e 100644 --- a/test/contrib/test_multi_precision_fp16_train.py +++ b/test/contrib/test_multi_precision_fp16_train.py @@ -217,22 +217,14 @@ def do_test(use_nesterov=False, optimizer=""): else: suffix = "with Nesterov" if use_nesterov else "without Nesterov" with self.scope_prog_guard(): - print( - "-----------------FP16 Train {}-----------------".format( - suffix - ) - ) + print(f"-----------------FP16 Train {suffix}-----------------") train_loss_fp16, test_loss_fp16 = train( use_pure_fp16=True, use_nesterov=use_nesterov, optimizer=optimizer, ) with self.scope_prog_guard(): - print( - "-----------------FP32 Train {}-----------------".format( - suffix - ) - ) + print(f"-----------------FP32 Train {suffix}-----------------") train_loss_fp32, test_loss_fp32 = train( use_pure_fp16=False, use_nesterov=use_nesterov, diff --git a/test/cpp/inference/api/full_ILSVRC2012_val_preprocess.py b/test/cpp/inference/api/full_ILSVRC2012_val_preprocess.py index f2dec13f8a3d5..3ebe610ea0a0f 100644 --- a/test/cpp/inference/api/full_ILSVRC2012_val_preprocess.py +++ b/test/cpp/inference/api/full_ILSVRC2012_val_preprocess.py @@ -174,9 +174,7 @@ def run_convert(): retry = retry + 1 else: raise RuntimeError( - "Can not convert the dataset to binary file with try limit {}".format( - try_limit - ) + f"Can not convert the dataset to binary file with try limit {try_limit}" ) download_concat(cache_folder, zip_path) convert_Imagenet_tar2bin(zip_path, output_file) diff --git a/test/cpp_extension/test_cpp_extension_setup.py b/test/cpp_extension/test_cpp_extension_setup.py index 2de2dd80deac9..fae59ea689993 100644 --- a/test/cpp_extension/test_cpp_extension_setup.py +++ b/test/cpp_extension/test_cpp_extension_setup.py @@ -33,9 +33,7 @@ def setUp(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) # install general extension # compile, install the custom op egg into site-packages under background - cmd = 'cd {} && {} cpp_extension_setup.py install'.format( - cur_dir, sys.executable - ) + cmd = f'cd {cur_dir} && {sys.executable} cpp_extension_setup.py install' run_cmd(cmd) site_dir = site.getsitepackages()[0] diff --git a/test/cpp_extension/test_mixed_extension_setup.py b/test/cpp_extension/test_mixed_extension_setup.py index 92aceff5067af..26c9dcbed81f7 100644 --- a/test/cpp_extension/test_mixed_extension_setup.py +++ b/test/cpp_extension/test_mixed_extension_setup.py @@ -169,9 +169,7 @@ def _test_static(self): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out - ), + err_msg=f'custom op out: {out},\n paddle api out: {pd_out}', ) def _test_dynamic(self): @@ -188,16 +186,12 @@ def _test_dynamic(self): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out - ), + err_msg=f'custom op out: {out},\n paddle api out: {pd_out}', ) np.testing.assert_array_equal( x_grad, pd_x_grad, - err_msg='custom op x grad: {},\n paddle api x grad: {}'.format( - x_grad, pd_x_grad - ), + err_msg=f'custom op x grad: {x_grad},\n paddle api x grad: {pd_x_grad}', ) def _test_double_grad_dynamic(self): @@ -214,9 +208,7 @@ def _test_double_grad_dynamic(self): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out - ), + err_msg=f'custom op out: {out},\n paddle api out: {pd_out}', ) np.testing.assert_array_equal( dx_grad, diff --git a/test/cpp_extension/utils.py b/test/cpp_extension/utils.py index 19659c6d5d716..be19ccb518f4a 100644 --- a/test/cpp_extension/utils.py +++ b/test/cpp_extension/utils.py @@ -59,9 +59,7 @@ def check_output(out, pd_out, name): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out - ), + err_msg=f'custom op {name}: {out},\n paddle api {name}: {pd_out}', ) @@ -75,7 +73,5 @@ def check_output_allclose(out, pd_out, name, rtol=5e-5, atol=1e-2): pd_out, rtol, atol, - err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out - ), + err_msg=f'custom op {name}: {out},\n paddle api {name}: {pd_out}', ) diff --git a/test/custom_kernel/test_custom_kernel_dot.py b/test/custom_kernel/test_custom_kernel_dot.py index 44aa6aec33c05..7059af7f49e3c 100644 --- a/test/custom_kernel/test_custom_kernel_dot.py +++ b/test/custom_kernel/test_custom_kernel_dot.py @@ -49,9 +49,7 @@ def test_custom_kernel_dot_run(self): np.testing.assert_array_equal( out.numpy(), result, - err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( - out.numpy(), result - ), + err_msg=f'custom kernel dot out: {out.numpy()},\n numpy dot out: {result}', ) @@ -82,9 +80,7 @@ def test_custom_kernel_dot_run(self): np.testing.assert_array_equal( out.numpy(), result, - err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( - out.numpy(), result - ), + err_msg=f'custom kernel dot out: {out.numpy()},\n numpy dot out: {result}', ) diff --git a/test/custom_kernel/test_custom_kernel_load.py b/test/custom_kernel/test_custom_kernel_load.py index f790ae9e3ed9d..a480567c5edcb 100644 --- a/test/custom_kernel/test_custom_kernel_load.py +++ b/test/custom_kernel/test_custom_kernel_load.py @@ -54,9 +54,7 @@ def setUp(self): [paddle_lib_path, '..', '..', 'paddle_custom_device'] ) # copy so to default path - cmd = 'mkdir -p {} && cp ./*.so {}'.format( - self.default_path, self.default_path - ) + cmd = f'mkdir -p {self.default_path} && cp ./*.so {self.default_path}' os.system(cmd) # wait def test_custom_kernel_dot_load(self): @@ -75,9 +73,7 @@ def test_custom_kernel_dot_load(self): np.testing.assert_array_equal( out.numpy(), result, - err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( - out.numpy(), result - ), + err_msg=f'custom kernel dot out: {out.numpy()},\n numpy dot out: {result}', ) def tearDown(self): diff --git a/test/custom_op/test_context_pool.py b/test/custom_op/test_context_pool.py index b8ccebc1106b4..19ac0ed49a4d3 100644 --- a/test/custom_op/test_context_pool.py +++ b/test/custom_op/test_context_pool.py @@ -24,9 +24,7 @@ # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. -file = '{}\\context_pool_jit\\context_pool_jit.pyd'.format( - get_build_directory() -) +file = f'{get_build_directory()}\\context_pool_jit\\context_pool_jit.pyd' if os.name == 'nt' and os.path.isfile(file): cmd = f'del {file}' run_cmd(cmd, True) diff --git a/test/custom_op/test_custom_attrs_jit.py b/test/custom_op/test_custom_attrs_jit.py index 676e81c49be27..25632b8e765d0 100644 --- a/test/custom_op/test_custom_attrs_jit.py +++ b/test/custom_op/test_custom_attrs_jit.py @@ -24,9 +24,7 @@ # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. -file = '{}\\custom_attrs_jit\\custom_attrs_jit.pyd'.format( - get_build_directory() -) +file = f'{get_build_directory()}\\custom_attrs_jit\\custom_attrs_jit.pyd' if os.name == 'nt' and os.path.isfile(file): cmd = f'del {file}' run_cmd(cmd, True) diff --git a/test/custom_op/test_custom_cast_op_jit.py b/test/custom_op/test_custom_cast_op_jit.py index 24c344c8ad985..8e8fe12203044 100644 --- a/test/custom_op/test_custom_cast_op_jit.py +++ b/test/custom_op/test_custom_cast_op_jit.py @@ -30,9 +30,7 @@ # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. -file = '{}\\custom_cast_module_jit\\custom_cast_module_jit.pyd'.format( - get_build_directory() -) +file = f'{get_build_directory()}\\custom_cast_module_jit\\custom_cast_module_jit.pyd' if os.name == 'nt' and os.path.isfile(file): cmd = f'del {file}' run_cmd(cmd, True) diff --git a/test/custom_op/test_custom_concat.py b/test/custom_op/test_custom_concat.py index 4fa1bcb618db8..153ca92a46def 100644 --- a/test/custom_op/test_custom_concat.py +++ b/test/custom_op/test_custom_concat.py @@ -116,9 +116,7 @@ def check_output(self, out, pd_out, name): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out - ), + err_msg=f'custom op {name}: {out},\n paddle api {name}: {pd_out}', ) def test_dynamic(self): diff --git a/test/custom_op/test_custom_relu_op_jit.py b/test/custom_op/test_custom_relu_op_jit.py index ecf9c6dfbc87f..95e4fab22b9c9 100644 --- a/test/custom_op/test_custom_relu_op_jit.py +++ b/test/custom_op/test_custom_relu_op_jit.py @@ -31,9 +31,7 @@ # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. -file = '{}\\custom_relu_module_jit\\custom_relu_module_jit.pyd'.format( - get_build_directory() -) +file = f'{get_build_directory()}\\custom_relu_module_jit\\custom_relu_module_jit.pyd' if os.name == 'nt' and os.path.isfile(file): cmd = f'del {file}' run_cmd(cmd, True) @@ -86,9 +84,7 @@ def test_static(self): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out - ), + err_msg=f'custom op out: {out},\n paddle api out: {pd_out}', ) def test_dynamic(self): @@ -107,9 +103,7 @@ def test_dynamic(self): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out - ), + err_msg=f'custom op out: {out},\n paddle api out: {pd_out}', ) np.testing.assert_array_equal( x_grad, diff --git a/test/custom_op/test_custom_relu_op_setup.py b/test/custom_op/test_custom_relu_op_setup.py index 0887f0268f959..eaa6cdc1a8c34 100644 --- a/test/custom_op/test_custom_relu_op_setup.py +++ b/test/custom_op/test_custom_relu_op_setup.py @@ -147,12 +147,10 @@ def setUp(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) # compile, install the custom op egg into site-packages under background if os.name == 'nt': - cmd = 'cd /d {} && python custom_relu_setup.py install'.format( - cur_dir - ) + cmd = f'cd /d {cur_dir} && python custom_relu_setup.py install' else: - cmd = 'cd {} && {} custom_relu_setup.py install'.format( - cur_dir, sys.executable + cmd = ( + f'cd {cur_dir} && {sys.executable} custom_relu_setup.py install' ) run_cmd(cmd) diff --git a/test/custom_op/test_custom_relu_op_xpu_setup.py b/test/custom_op/test_custom_relu_op_xpu_setup.py index 967b976533ebf..bf14ed6844b1b 100644 --- a/test/custom_op/test_custom_relu_op_xpu_setup.py +++ b/test/custom_op/test_custom_relu_op_xpu_setup.py @@ -64,8 +64,8 @@ def custom_relu_static( class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): def setUp(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) - cmd = 'cd {} && {} custom_relu_xpu_setup.py install'.format( - cur_dir, sys.executable + cmd = ( + f'cd {cur_dir} && {sys.executable} custom_relu_xpu_setup.py install' ) run_cmd(cmd) diff --git a/test/custom_op/test_custom_simple_slice.py b/test/custom_op/test_custom_simple_slice.py index e2662e70f3bc6..166108f4fe63d 100644 --- a/test/custom_op/test_custom_simple_slice.py +++ b/test/custom_op/test_custom_simple_slice.py @@ -24,9 +24,7 @@ # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. -file = '{}\\custom_simple_slice\\custom_simple_slice.pyd'.format( - get_build_directory() -) +file = f'{get_build_directory()}\\custom_simple_slice\\custom_simple_slice.pyd' if os.name == 'nt' and os.path.isfile(file): cmd = f'del {file}' run_cmd(cmd, True) diff --git a/test/custom_op/test_custom_tensor_operator.py b/test/custom_op/test_custom_tensor_operator.py index f6edbd934171d..8460bd2dba95a 100644 --- a/test/custom_op/test_custom_tensor_operator.py +++ b/test/custom_op/test_custom_tensor_operator.py @@ -30,9 +30,7 @@ # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. -file = '{}\\custom_tensor_operator\\custom_tensor_operator.pyd'.format( - get_build_directory() -) +file = f'{get_build_directory()}\\custom_tensor_operator\\custom_tensor_operator.pyd' if os.name == 'nt' and os.path.isfile(file): cmd = f'del {file}' run_cmd(cmd, True) diff --git a/test/custom_op/utils.py b/test/custom_op/utils.py index d65a0f2175f6e..c6928a0024bb8 100644 --- a/test/custom_op/utils.py +++ b/test/custom_op/utils.py @@ -61,9 +61,7 @@ def check_output(out, pd_out, name): np.testing.assert_array_equal( out, pd_out, - err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out - ), + err_msg=f'custom op {name}: {out},\n paddle api {name}: {pd_out}', ) @@ -77,7 +75,5 @@ def check_output_allclose(out, pd_out, name, rtol=5e-5, atol=1e-2): pd_out, rtol, atol, - err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out - ), + err_msg=f'custom op {name}: {out},\n paddle api {name}: {pd_out}', ) diff --git a/test/custom_runtime/test_collective_process_group_xccl.py b/test/custom_runtime/test_collective_process_group_xccl.py index 8b80b3b361f1f..4d007d726273b 100644 --- a/test/custom_runtime/test_collective_process_group_xccl.py +++ b/test/custom_runtime/test_collective_process_group_xccl.py @@ -167,9 +167,7 @@ def setUp(self): # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( cur_dir, - '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name - ), + f'{self.temp_dir.name}/PaddleCustomDevice/backends/custom_cpu/build', ) os.environ['FLAGS_selected_custom_cpus'] = '0,1' os.environ['CUSTOM_CPU_VISIBLE_DEVICES'] = '0,1' diff --git a/test/custom_runtime/test_custom_cpu_plugin.py b/test/custom_runtime/test_custom_cpu_plugin.py index 5dd375514057e..b92df8def9dd3 100755 --- a/test/custom_runtime/test_custom_cpu_plugin.py +++ b/test/custom_runtime/test_custom_cpu_plugin.py @@ -43,9 +43,7 @@ def setUp(self): # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( cur_dir, - '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name - ), + f'{self.temp_dir.name}/PaddleCustomDevice/backends/custom_cpu/build', ) def tearDown(self): diff --git a/test/custom_runtime/test_custom_cpu_profiler_plugin.py b/test/custom_runtime/test_custom_cpu_profiler_plugin.py index 2bb9e278cfb76..220c9a0a21aeb 100644 --- a/test/custom_runtime/test_custom_cpu_profiler_plugin.py +++ b/test/custom_runtime/test_custom_cpu_profiler_plugin.py @@ -41,9 +41,7 @@ def setUp(self): # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( cur_dir, - '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name - ), + f'{self.temp_dir.name}/PaddleCustomDevice/backends/custom_cpu/build', ) def tearDown(self): diff --git a/test/custom_runtime/test_custom_cpu_to_static.py b/test/custom_runtime/test_custom_cpu_to_static.py index c546382eb2385..60ba27004afbd 100644 --- a/test/custom_runtime/test_custom_cpu_to_static.py +++ b/test/custom_runtime/test_custom_cpu_to_static.py @@ -123,9 +123,7 @@ def setUp(self): # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( cur_dir, - '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name - ), + f'{self.temp_dir.name}/PaddleCustomDevice/backends/custom_cpu/build', ) def tearDown(self): diff --git a/test/custom_runtime/test_custom_op_setup.py b/test/custom_runtime/test_custom_op_setup.py index f0f5d1cb505a6..47c7d9821d6b8 100644 --- a/test/custom_runtime/test_custom_op_setup.py +++ b/test/custom_runtime/test_custom_op_setup.py @@ -123,9 +123,7 @@ def setUp(self): # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( self.cur_dir, - '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name - ), + f'{self.temp_dir.name}/PaddleCustomDevice/backends/custom_cpu/build', ) # `import paddle` loads custom_cpu.so, hence we must import paddle after finishing build PaddleCustomDevice @@ -185,9 +183,7 @@ def _test_static(self): np.testing.assert_array_equal( out, pd_out, - err_msg="custom op out: {},\n paddle api out: {}".format( - out, pd_out - ), + err_msg=f"custom op out: {out},\n paddle api out: {pd_out}", ) def _test_dynamic(self): @@ -202,16 +198,12 @@ def _test_dynamic(self): np.testing.assert_array_equal( out, pd_out, - err_msg="custom op out: {},\n paddle api out: {}".format( - out, pd_out - ), + err_msg=f"custom op out: {out},\n paddle api out: {pd_out}", ) np.testing.assert_array_equal( x_grad, pd_x_grad, - err_msg="custom op x grad: {},\n paddle api x grad: {}".format( - x_grad, pd_x_grad - ), + err_msg=f"custom op x grad: {x_grad},\n paddle api x grad: {pd_x_grad}", ) def _test_double_grad_dynamic(self): @@ -226,9 +218,7 @@ def _test_double_grad_dynamic(self): np.testing.assert_array_equal( out, pd_out, - err_msg="custom op out: {},\n paddle api out: {}".format( - out, pd_out - ), + err_msg=f"custom op out: {out},\n paddle api out: {pd_out}", ) np.testing.assert_array_equal( dx_grad, @@ -264,9 +254,7 @@ def _test_with_dataloader(self): np.testing.assert_array_equal( out, pd_out, - err_msg="custom op out: {},\n paddle api out: {}".format( - out, pd_out - ), + err_msg=f"custom op out: {out},\n paddle api out: {pd_out}", ) if batch_id == 5: diff --git a/test/distributed_passes/dist_pass_test_base.py b/test/distributed_passes/dist_pass_test_base.py index dc8da03bd6a4f..72bc7ca78d9de 100644 --- a/test/distributed_passes/dist_pass_test_base.py +++ b/test/distributed_passes/dist_pass_test_base.py @@ -287,9 +287,7 @@ def apply_passes(self, main_prog, startup_prog): self.assertEqual( len(passes), len(new_passes), - "After solving conflicts, the left passes are: {}".format( - auto_pass_manager.names - ), + f"After solving conflicts, the left passes are: {auto_pass_manager.names}", ) for i, (p1, p2) in enumerate(zip(passes, new_passes)): diff --git a/test/dygraph_to_static/test_break_continue.py b/test/dygraph_to_static/test_break_continue.py index 6c6547d579577..d3a2162dc787e 100644 --- a/test/dygraph_to_static/test_break_continue.py +++ b/test/dygraph_to_static/test_break_continue.py @@ -235,9 +235,7 @@ def test_transformed_static_result(self): dygraph_res, static_res, rtol=1e-05, - err_msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph res is {dygraph_res}\nstatic_res is {static_res}', ) diff --git a/test/dygraph_to_static/test_build_strategy.py b/test/dygraph_to_static/test_build_strategy.py index 92968fabf28f5..83ed8d56751dd 100644 --- a/test/dygraph_to_static/test_build_strategy.py +++ b/test/dygraph_to_static/test_build_strategy.py @@ -53,17 +53,13 @@ def verify_predict(self): dy_jit_pre, st_pre, rtol=1e-05, - err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre - ), + err_msg=f'dy_jit_pre:\n {dy_jit_pre}\n, st_pre: \n{st_pre}.', ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, - err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre - ), + err_msg=f'predictor_pre:\n {predictor_pre}\n, st_pre: \n{st_pre}.', ) @ast_only_test @@ -74,9 +70,7 @@ def test_resnet(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) self.verify_predict() diff --git a/test/dygraph_to_static/test_cache_program.py b/test/dygraph_to_static/test_cache_program.py index 2b8a88245de87..0602b15b3054b 100644 --- a/test/dygraph_to_static/test_cache_program.py +++ b/test/dygraph_to_static/test_cache_program.py @@ -115,9 +115,7 @@ def test_with_optimizer(self): dygraph_loss, static_loss, rtol=1e-05, - err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss - ), + err_msg=f'dygraph is {dygraph_loss}\n static_res is \n{static_loss}', ) diff --git a/test/dygraph_to_static/test_cast.py b/test/dygraph_to_static/test_cast.py index 156d25d747137..7e2b0914a5fff 100644 --- a/test/dygraph_to_static/test_cast.py +++ b/test/dygraph_to_static/test_cast.py @@ -103,9 +103,7 @@ def test_cast_result(self): res, ref_val, rtol=1e-05, - err_msg='The casted value is {}.\nThe correct value is {}.'.format( - res, ref_val - ), + err_msg=f'The casted value is {res}.\nThe correct value is {ref_val}.', ) @@ -176,9 +174,7 @@ def test_cast_result(self): res, ref_val, rtol=1e-05, - err_msg='The casted value is {}.\nThe correct value is {}.'.format( - res, ref_val - ), + err_msg=f'The casted value is {res}.\nThe correct value is {ref_val}.', ) @@ -198,9 +194,7 @@ def test_cast_result(self): ref_val = int(self.input) self.assertTrue( res == ref_val, - msg='The casted value is {}.\nThe correct value is {}.'.format( - res, ref_val - ), + msg=f'The casted value is {res}.\nThe correct value is {ref_val}.', ) diff --git a/test/dygraph_to_static/test_container.py b/test/dygraph_to_static/test_container.py index 95dc0214e9786..412362ba725c5 100644 --- a/test/dygraph_to_static/test_container.py +++ b/test/dygraph_to_static/test_container.py @@ -114,9 +114,7 @@ def test_train(self): dy_out, st_out, rtol=1e-05, - err_msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_out, st_out - ), + err_msg=f'dygraph_res is {dy_out}\nstatic_res is {st_out}', ) def _test_load(self, net, x): diff --git a/test/dygraph_to_static/test_convert_call.py b/test/dygraph_to_static/test_convert_call.py index 79f23351cb6dd..fb6c69fc899fa 100644 --- a/test/dygraph_to_static/test_convert_call.py +++ b/test/dygraph_to_static/test_convert_call.py @@ -109,9 +109,7 @@ def test_transformed_static_result(self): dygraph_res, static_res, rtol=1e-05, - err_msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph res is {dygraph_res}\nstatic_res is {static_res}', ) diff --git a/test/dygraph_to_static/test_dict.py b/test/dygraph_to_static/test_dict.py index ac92feef1d140..80180b522cf54 100644 --- a/test/dygraph_to_static/test_dict.py +++ b/test/dygraph_to_static/test_dict.py @@ -203,9 +203,7 @@ def test_transformed_result(self): dygraph_res, static_res, rtol=1e-05, - err_msg='dygraph result is {}\nstatic result is {}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph result is {dygraph_res}\nstatic result is {static_res}', ) @@ -247,9 +245,7 @@ def test_ast_to_func(self): self.assertTrue( (dygraph_result == static_result).all(), - msg="dygraph result: {}\nstatic result: {}".format( - dygraph_result, static_result - ), + msg=f"dygraph result: {dygraph_result}\nstatic result: {static_result}", ) diff --git a/test/dygraph_to_static/test_error.py b/test/dygraph_to_static/test_error.py index 762859d2d38f1..8c6f74d75c4e0 100644 --- a/test/dygraph_to_static/test_error.py +++ b/test/dygraph_to_static/test_error.py @@ -257,9 +257,7 @@ def set_exception_type(self): def set_message(self): self.expected_message = [ - 'File "{}", line 37, in func_error_in_compile_time'.format( - self.filepath - ), + f'File "{self.filepath}", line 37, in func_error_in_compile_time', 'inner_func()', f'File "{self.filepath}", line 30, in inner_func', 'def inner_func():', @@ -288,9 +286,7 @@ def set_exception_type(self): def set_message(self): self.expected_message = [ - 'File "{}", line 48, in func_error_in_compile_time_2'.format( - self.filepath - ), + f'File "{self.filepath}", line 48, in func_error_in_compile_time_2', 'def func_error_in_compile_time_2(x):', 'x = base.dygraph.to_variable(x)', 'x = paddle.reshape(x, shape=[1, 2])', @@ -338,9 +334,7 @@ def set_exception_type(self): def set_message(self): self.expected_message = [ - 'File "{}", line 56, in func_error_in_runtime'.format( - self.filepath - ), + f'File "{self.filepath}", line 56, in func_error_in_runtime', 'x = base.dygraph.to_variable(x)', 'two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")', 'x = paddle.reshape(x, shape=[1, two])', diff --git a/test/dygraph_to_static/test_fetch_feed.py b/test/dygraph_to_static/test_fetch_feed.py index 5ddc1f3da24ee..0834f2ec4a315 100644 --- a/test/dygraph_to_static/test_fetch_feed.py +++ b/test/dygraph_to_static/test_fetch_feed.py @@ -94,9 +94,7 @@ def test_declarative(self): dygraph_res, static_res, rtol=1e-05, - err_msg='dygraph_res is {}\n static_res is \n{}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph_res is {dygraph_res}\n static_res is \n{static_res}', ) diff --git a/test/dygraph_to_static/test_lac.py b/test/dygraph_to_static/test_lac.py index a650a25c25b07..522eb81cf5a7a 100644 --- a/test/dygraph_to_static/test_lac.py +++ b/test/dygraph_to_static/test_lac.py @@ -624,9 +624,7 @@ def test_train(self): dy_out, st_out, rtol=1e-05, - err_msg='dygraph output:\n{},\nstatic output:\n {}.'.format( - dy_out, st_out - ), + err_msg=f'dygraph output:\n{dy_out},\nstatic output:\n {st_out}.', ) # Prediction needs trained models, so put `test_predict` at last of `test_train` # self.verify_predict() diff --git a/test/dygraph_to_static/test_layer_hook.py b/test/dygraph_to_static/test_layer_hook.py index 4d3fb03229859..bf679cf8dcc2e 100644 --- a/test/dygraph_to_static/test_layer_hook.py +++ b/test/dygraph_to_static/test_layer_hook.py @@ -93,9 +93,7 @@ def test_hook(self): st_out, dy_out, rtol=1e-05, - err_msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_out, st_out - ), + err_msg=f'dygraph_res is {dy_out}\nstatic_res is {st_out}', ) np.testing.assert_allclose( st_out, diff --git a/test/dygraph_to_static/test_list.py b/test/dygraph_to_static/test_list.py index b0febb2b0c9ee..9ad646de8818c 100644 --- a/test/dygraph_to_static/test_list.py +++ b/test/dygraph_to_static/test_list.py @@ -261,9 +261,7 @@ def test_transformed_static_result(self): stat_res, dy_res, rtol=1e-05, - err_msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_res, stat_res - ), + err_msg=f'dygraph_res is {dy_res}\nstatic_res is {stat_res}', ) diff --git a/test/dygraph_to_static/test_logical.py b/test/dygraph_to_static/test_logical.py index 3b00903bc478c..9e0f1d12bd9b4 100644 --- a/test/dygraph_to_static/test_logical.py +++ b/test/dygraph_to_static/test_logical.py @@ -206,9 +206,7 @@ def test_transformed_result(self): dygraph_res, static_res, rtol=1e-05, - err_msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph result is {dygraph_res}\nstatic_result is {static_res}', ) @@ -223,9 +221,7 @@ def test_transformed_result(self): dygraph_res, static_res, rtol=1e-05, - err_msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph result is {dygraph_res}\nstatic_result is {static_res}', ) diff --git a/test/dygraph_to_static/test_lstm.py b/test/dygraph_to_static/test_lstm.py index 4dc5b5a0fba75..2e26a37705631 100644 --- a/test/dygraph_to_static/test_lstm.py +++ b/test/dygraph_to_static/test_lstm.py @@ -103,9 +103,7 @@ def test_save_in_eval(self, with_training=True): dygraph_out.numpy(), static_out.numpy(), rtol=1e-05, - err_msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, static_out - ), + err_msg=f'dygraph_out is {dygraph_out}\n static_out is \n{static_out}', ) # switch back into train mode. net.train() @@ -114,9 +112,7 @@ def test_save_in_eval(self, with_training=True): dygraph_out.numpy(), train_out.numpy(), rtol=1e-05, - err_msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, train_out - ), + err_msg=f'dygraph_out is {dygraph_out}\n static_out is \n{train_out}', ) def test_save_without_training(self): @@ -176,9 +172,7 @@ def test_save_in_eval(self): eval_out.numpy(), infer_out.numpy(), rtol=1e-05, - err_msg='eval_out is {}\n infer_out is \n{}'.format( - eval_out, infer_out - ), + err_msg=f'eval_out is {eval_out}\n infer_out is \n{infer_out}', ) diff --git a/test/dygraph_to_static/test_mnist.py b/test/dygraph_to_static/test_mnist.py index d8f22e8bd1b17..9641a9225cee7 100644 --- a/test/dygraph_to_static/test_mnist.py +++ b/test/dygraph_to_static/test_mnist.py @@ -168,9 +168,7 @@ def test_mnist_to_static(self): dygraph_loss, static_loss, rtol=1e-05, - err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss - ), + err_msg=f'dygraph is {dygraph_loss}\n static_res is \n{static_loss}', ) def test_mnist_declarative_cpu_vs_mkldnn(self): diff --git a/test/dygraph_to_static/test_mnist_amp.py b/test/dygraph_to_static/test_mnist_amp.py index e5e11062aad7b..3e4b9d1b11657 100644 --- a/test/dygraph_to_static/test_mnist_amp.py +++ b/test/dygraph_to_static/test_mnist_amp.py @@ -45,9 +45,7 @@ def test_mnist_to_static(self): static_loss, rtol=1e-05, atol=0.001, - err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss - ), + err_msg=f'dygraph is {dygraph_loss}\n static_res is \n{static_loss}', ) def train(self, to_static=False): diff --git a/test/dygraph_to_static/test_mnist_pure_fp16.py b/test/dygraph_to_static/test_mnist_pure_fp16.py index 9d5ae58edbbd7..c1489cc6e9158 100644 --- a/test/dygraph_to_static/test_mnist_pure_fp16.py +++ b/test/dygraph_to_static/test_mnist_pure_fp16.py @@ -43,9 +43,7 @@ def test_mnist_to_static(self): static_loss, rtol=1e-05, atol=0.001, - err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss - ), + err_msg=f'dygraph is {dygraph_loss}\n static_res is \n{static_loss}', ) def train(self, to_static=False): diff --git a/test/dygraph_to_static/test_mobile_net.py b/test/dygraph_to_static/test_mobile_net.py index 607d7644763de..5536a14e695c4 100644 --- a/test/dygraph_to_static/test_mobile_net.py +++ b/test/dygraph_to_static/test_mobile_net.py @@ -716,18 +716,14 @@ def assert_same_predict(self, model_name): dy_jit_pre, st_pre, rtol=1e-05, - err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre - ), + err_msg=f'dy_jit_pre:\n {dy_jit_pre}\n, st_pre: \n{st_pre}.', ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, atol=1e-05, - err_msg='inference_pred_res:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre - ), + err_msg=f'inference_pred_res:\n {predictor_pre}\n, st_pre: \n{st_pre}.', ) @test_with_new_ir diff --git a/test/dygraph_to_static/test_pylayer.py b/test/dygraph_to_static/test_pylayer.py index 8191760c72a3f..88558e3d628fb 100644 --- a/test/dygraph_to_static/test_pylayer.py +++ b/test/dygraph_to_static/test_pylayer.py @@ -32,9 +32,7 @@ def compare_result(dygraph_res, static_res, rtol=1e-5, atol=0): static_res.detach().numpy(), rtol=rtol, atol=atol, - err_msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res - ), + err_msg=f'dygraph result is {dygraph_res}\nstatic_result is {static_res}', ) diff --git a/test/dygraph_to_static/test_resnet.py b/test/dygraph_to_static/test_resnet.py index 3d4585117c977..a99999c4e7447 100644 --- a/test/dygraph_to_static/test_resnet.py +++ b/test/dygraph_to_static/test_resnet.py @@ -143,9 +143,7 @@ def __init__(self, layers=50, class_dim=102): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: depth = [3, 4, 6, 3] @@ -412,17 +410,13 @@ def verify_predict(self): dy_jit_pre, st_pre, rtol=1e-05, - err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre - ), + err_msg=f'dy_jit_pre:\n {dy_jit_pre}\n, st_pre: \n{st_pre}.', ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, - err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre - ), + err_msg=f'predictor_pre:\n {predictor_pre}\n, st_pre: \n{st_pre}.', ) @test_with_new_ir @@ -433,9 +427,7 @@ def test_resnet_new_ir(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) def test_resnet(self): @@ -445,9 +437,7 @@ def test_resnet(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) self.verify_predict() @@ -460,9 +450,7 @@ def test_resnet_composite_forward_backward(self): static_loss, dygraph_loss, rtol=1e-02, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) def test_in_static_mode_mkldnn(self): diff --git a/test/dygraph_to_static/test_resnet_amp.py b/test/dygraph_to_static/test_resnet_amp.py index 40c60520fbcad..60a30db707be4 100644 --- a/test/dygraph_to_static/test_resnet_amp.py +++ b/test/dygraph_to_static/test_resnet_amp.py @@ -124,9 +124,7 @@ def test_resnet(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) def test_resnet_composite(self): @@ -138,9 +136,7 @@ def test_resnet_composite(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) diff --git a/test/dygraph_to_static/test_resnet_pure_fp16.py b/test/dygraph_to_static/test_resnet_pure_fp16.py index c878372fbd406..1eb6a8ac9b3a5 100644 --- a/test/dygraph_to_static/test_resnet_pure_fp16.py +++ b/test/dygraph_to_static/test_resnet_pure_fp16.py @@ -132,9 +132,7 @@ def test_resnet(self): dygraph_loss, rtol=1e-05, atol=0.001, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) def test_resnet_composite(self): @@ -149,9 +147,7 @@ def test_resnet_composite(self): dygraph_loss, rtol=1e-05, atol=0.001, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) diff --git a/test/dygraph_to_static/test_resnet_v2.py b/test/dygraph_to_static/test_resnet_v2.py index 2b4e9676c5f36..cf941effd2c28 100644 --- a/test/dygraph_to_static/test_resnet_v2.py +++ b/test/dygraph_to_static/test_resnet_v2.py @@ -148,9 +148,7 @@ def __init__(self, layers=50, class_dim=102): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: depth = [3, 4, 6, 3] @@ -419,17 +417,13 @@ def verify_predict(self): dy_jit_pre, st_pre, rtol=1e-05, - err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre - ), + err_msg=f'dy_jit_pre:\n {dy_jit_pre}\n, st_pre: \n{st_pre}.', ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, - err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre - ), + err_msg=f'predictor_pre:\n {predictor_pre}\n, st_pre: \n{st_pre}.', ) @test_with_new_ir @@ -440,9 +434,7 @@ def test_resnet_new_ir(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) def test_resnet(self): @@ -452,9 +444,7 @@ def test_resnet(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) self.verify_predict() @@ -468,9 +458,7 @@ def test_resnet_composite(self): static_loss, dygraph_loss, rtol=1e-05, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), + err_msg=f'static_loss: {static_loss} \n dygraph_loss: {dygraph_loss}', ) def test_in_static_mode_mkldnn(self): diff --git a/test/dygraph_to_static/test_se_resnet.py b/test/dygraph_to_static/test_se_resnet.py index aef9b3a2f0b6a..c12990b53659d 100644 --- a/test/dygraph_to_static/test_se_resnet.py +++ b/test/dygraph_to_static/test_se_resnet.py @@ -224,9 +224,7 @@ def __init__(self, layers=50, class_dim=102): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: cardinality = 32 @@ -542,9 +540,7 @@ def verify_predict(self): dy_jit_pre, st_pre, rtol=1e-05, - err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre - ), + err_msg=f'dy_jit_pre:\n {dy_jit_pre}\n, st_pre: \n{st_pre}.', ) flat_st_pre = st_pre.flatten() diff --git a/test/dygraph_to_static/test_seq2seq.py b/test/dygraph_to_static/test_seq2seq.py index fee69b74bfdfe..85de170c3f06c 100644 --- a/test/dygraph_to_static/test_seq2seq.py +++ b/test/dygraph_to_static/test_seq2seq.py @@ -211,9 +211,7 @@ def _test_train(self, attn_model=False): result = np.allclose(dygraph_loss, static_loss) self.assertTrue( result, - msg="\ndygraph_loss = {} \nstatic_loss = {}".format( - dygraph_loss, static_loss - ), + msg=f"\ndygraph_loss = {dygraph_loss} \nstatic_loss = {static_loss}", ) def _test_predict(self, attn_model=False): @@ -222,9 +220,7 @@ def _test_predict(self, attn_model=False): result = np.allclose(pred_static, pred_dygraph) self.assertTrue( result, - msg="\npred_dygraph = {} \npred_static = {}".format( - pred_dygraph, pred_static - ), + msg=f"\npred_dygraph = {pred_dygraph} \npred_static = {pred_static}", ) def test_base_model(self): diff --git a/test/dygraph_to_static/yolov3.py b/test/dygraph_to_static/yolov3.py index f72ee8bb8f8ff..8712a49b44a99 100644 --- a/test/dygraph_to_static/yolov3.py +++ b/test/dygraph_to_static/yolov3.py @@ -133,9 +133,7 @@ class YoloDetectionBlock(paddle.nn.Layer): def __init__(self, ch_in, channel, is_test=True): super().__init__() - assert channel % 2 == 0, "channel {} cannot be divided by 2".format( - channel - ) + assert channel % 2 == 0, f"channel {channel} cannot be divided by 2" self.conv0 = ConvBNLayer( ch_in=ch_in, diff --git a/test/fft/spectral_op_np.py b/test/fft/spectral_op_np.py index fadc3349213b9..361cd04ddac8c 100644 --- a/test/fft/spectral_op_np.py +++ b/test/fft/spectral_op_np.py @@ -35,9 +35,7 @@ def _get_norm_mode(norm, forward): def _get_inv_norm(n, norm_mode): - assert isinstance(norm_mode, NormMode), "invalid norm_type {}".format( - norm_mode - ) + assert isinstance(norm_mode, NormMode), f"invalid norm_type {norm_mode}" if norm_mode == NormMode.none: return 1.0 if norm_mode == NormMode.by_sqrt_n: diff --git a/test/ir/inference/test_trt_convert_multiclass_nms.py b/test/ir/inference/test_trt_convert_multiclass_nms.py index 0033bf8aa4bdd..578a3f0c74ced 100644 --- a/test/ir/inference/test_trt_convert_multiclass_nms.py +++ b/test/ir/inference/test_trt_convert_multiclass_nms.py @@ -217,9 +217,7 @@ def assert_tensors_near( arr, rtol=rtol, atol=atol, - err_msg='Output has diff, Maximum absolute error: {}'.format( - np.amax(diff) - ), + err_msg=f'Output has diff, Maximum absolute error: {np.amax(diff)}', ) def assert_op_size(self, trt_engine_num, paddle_op_num): diff --git a/test/ir/inference/test_trt_convert_multiclass_nms3.py b/test/ir/inference/test_trt_convert_multiclass_nms3.py index 60f2b0a68a41a..f221e10f5339f 100644 --- a/test/ir/inference/test_trt_convert_multiclass_nms3.py +++ b/test/ir/inference/test_trt_convert_multiclass_nms3.py @@ -226,9 +226,7 @@ def assert_tensors_near( arr, rtol=rtol, atol=atol, - err_msg='Output has diff, Maximum absolute error: {}'.format( - np.amax(diff) - ), + err_msg=f'Output has diff, Maximum absolute error: {np.amax(diff)}', ) def assert_op_size(self, trt_engine_num, paddle_op_num): diff --git a/test/ir/inference/test_trt_pool3d_op.py b/test/ir/inference/test_trt_pool3d_op.py index 66a05775b071d..462d481cd7d66 100644 --- a/test/ir/inference/test_trt_pool3d_op.py +++ b/test/ir/inference/test_trt_pool3d_op.py @@ -147,9 +147,7 @@ def test(self): ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic - ) + f'Precision: {precision}, Serialize: {serialize}, Dynamic: {is_dynamic}' ): self.precision = precision self.serialize = serialize @@ -258,9 +256,7 @@ def test(self): ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic - ) + f'Precision: {precision}, Serialize: {serialize}, Dynamic: {is_dynamic}' ): self.precision = precision self.serialize = serialize @@ -359,9 +355,7 @@ def test(self): ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic - ) + f'Precision: {precision}, Serialize: {serialize}, Dynamic: {is_dynamic}' ): self.precision = precision self.serialize = serialize diff --git a/test/ir/inference/test_trt_pool_op.py b/test/ir/inference/test_trt_pool_op.py index 37ffe6452e0f5..0515eef7150fb 100644 --- a/test/ir/inference/test_trt_pool_op.py +++ b/test/ir/inference/test_trt_pool_op.py @@ -132,9 +132,7 @@ def test(self): ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic - ) + f'Precision: {precision}, Serialize: {serialize}, Dynamic: {is_dynamic}' ): self.precision = precision self.serialize = serialize diff --git a/test/legacy_test/auto_parallel_autoconvert.py b/test/legacy_test/auto_parallel_autoconvert.py index 5e7b501ed623b..6b41ee9bab180 100644 --- a/test/legacy_test/auto_parallel_autoconvert.py +++ b/test/legacy_test/auto_parallel_autoconvert.py @@ -151,14 +151,10 @@ def setUp(self): def tearDown(self): os.remove( - "./model_state_rank{}.pdmodel".format( - str(paddle.distributed.get_rank()) - ) + f"./model_state_rank{str(paddle.distributed.get_rank())}.pdmodel" ) os.remove( - "./dist_attr_rank{}.pdattr".format( - str(paddle.distributed.get_rank()) - ) + f"./dist_attr_rank{str(paddle.distributed.get_rank())}.pdattr" ) def test_mlp_mp2pp(self): @@ -250,14 +246,10 @@ def setUp(self): def tearDown(self): os.remove( - "./model_state_rank{}.pdmodel".format( - str(paddle.distributed.get_rank()) - ) + f"./model_state_rank{str(paddle.distributed.get_rank())}.pdmodel" ) os.remove( - "./dist_attr_rank{}.pdattr".format( - str(paddle.distributed.get_rank()) - ) + f"./dist_attr_rank{str(paddle.distributed.get_rank())}.pdattr" ) def test_mlp_pp2mp(self): diff --git a/test/legacy_test/benchmark.py b/test/legacy_test/benchmark.py index bc3f2ae7810fb..53964eb6a7b19 100644 --- a/test/legacy_test/benchmark.py +++ b/test/legacy_test/benchmark.py @@ -86,9 +86,7 @@ def timeit_output(self, iters=100): elapses.append(self.timeit_output_with_place(place, iters)) for place, elapse in zip(places, elapses): print( - "One pass of ({}_op) at {} cost {}".format( - self.op_type, str(place), elapse - ) + f"One pass of ({self.op_type}_op) at {str(place)} cost {elapse}" ) def timeit_grad_with_place(self, place, iters=100): @@ -110,7 +108,5 @@ def timeit_grad(self, iters=100): elapses.append(self.timeit_grad_with_place(place, iters)) for place, elapse in zip(places, elapses): print( - "One pass of ({}_grad_op) at {} cost {}".format( - self.op_type, str(place), elapse - ) + f"One pass of ({self.op_type}_grad_op) at {str(place)} cost {elapse}" ) diff --git a/test/legacy_test/dist_fleet_ctr.py b/test/legacy_test/dist_fleet_ctr.py index 64c4f69a55654..0d57a7c73e13d 100644 --- a/test/legacy_test/dist_fleet_ctr.py +++ b/test/legacy_test/dist_fleet_ctr.py @@ -200,9 +200,7 @@ def do_distributed_testing(self, fleet): fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) - message = "TEST ---> batch_idx: {} loss: {}\n".format( - batch_idx, loss_val - ) + message = f"TEST ---> batch_idx: {batch_idx} loss: {loss_val}\n" fleet.util.print_on_rank(message, 0) except base.core.EOFException: self.test_reader.reset() @@ -240,9 +238,7 @@ def do_pyreader_training(self, fleet): # np.array(loss_val), mode="sum") # loss_all_trainer = fleet.util.all_gather(float(loss_val)) # loss_val = float(reduce_output) / len(loss_all_trainer) - message = "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val - ) + message = f"TRAIN ---> pass: {epoch_id} loss: {loss_val}\n" fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start diff --git a/test/legacy_test/dist_fleet_ctr_ps_gpu.py b/test/legacy_test/dist_fleet_ctr_ps_gpu.py index bf109e6a61306..093a93e9a2c85 100644 --- a/test/legacy_test/dist_fleet_ctr_ps_gpu.py +++ b/test/legacy_test/dist_fleet_ctr_ps_gpu.py @@ -80,9 +80,7 @@ def do_pyreader_training(self, fleet): ) loss_all_trainer = fleet.util.all_gather(float(loss_val)) loss_val = float(reduce_output) / len(loss_all_trainer) - message = "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val - ) + message = f"TRAIN ---> pass: {epoch_id} loss: {loss_val}\n" fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start diff --git a/test/legacy_test/dist_fleet_simnet_bow.py b/test/legacy_test/dist_fleet_simnet_bow.py index ffbe371cc228a..0d6827b7d339e 100644 --- a/test/legacy_test/dist_fleet_simnet_bow.py +++ b/test/legacy_test/dist_fleet_simnet_bow.py @@ -263,9 +263,7 @@ def do_pyreader_training(self, fleet): fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) - message = "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val - ) + message = f"TRAIN ---> pass: {epoch_id} loss: {loss_val}\n" fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start diff --git a/test/legacy_test/dist_fleet_sparse_embedding_ctr.py b/test/legacy_test/dist_fleet_sparse_embedding_ctr.py index 120b7e51305d7..e5991b22d8a77 100644 --- a/test/legacy_test/dist_fleet_sparse_embedding_ctr.py +++ b/test/legacy_test/dist_fleet_sparse_embedding_ctr.py @@ -180,11 +180,7 @@ def do_pyreader_training(self, fleet): fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) - print( - "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val - ) - ) + print(f"TRAIN ---> pass: {epoch_id} loss: {loss_val}\n") except base.core.EOFException: self.reader.reset() diff --git a/test/legacy_test/dist_fleet_sync_batch_norm.py b/test/legacy_test/dist_fleet_sync_batch_norm.py index 86e8e921adf10..c01267800893a 100644 --- a/test/legacy_test/dist_fleet_sync_batch_norm.py +++ b/test/legacy_test/dist_fleet_sync_batch_norm.py @@ -94,9 +94,7 @@ def train(args): rank = paddle.distributed.get_rank() filepath = os.path.join( args.data_dir, - 'input_{}_{}_{}_{}.npy'.format( - rank, args.only_forward, str(args.dtype), args.layout - ), + f'input_{rank}_{args.only_forward}_{str(args.dtype)}_{args.layout}.npy', ) data = np.load(filepath) @@ -110,9 +108,7 @@ def train(args): for i in range(0, len(sync_bn_fetches)): file_path = os.path.join( args.data_dir, - 'output_{}_{}_{}_{}.npy'.format( - rank, args.only_forward, str(args.dtype), i - ), + f'output_{rank}_{args.only_forward}_{str(args.dtype)}_{i}.npy', ) np.save(file_path, sync_bn_fetches[i]) diff --git a/test/legacy_test/dist_se_resnext.py b/test/legacy_test/dist_se_resnext.py index 98b6af3af08ee..672cec6075dcd 100644 --- a/test/legacy_test/dist_se_resnext.py +++ b/test/legacy_test/dist_se_resnext.py @@ -47,9 +47,7 @@ def net(self, input, class_dim=1000): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: cardinality = 32 reduction_ratio = 16 diff --git a/test/legacy_test/fleet_meta_optimizer_base.py b/test/legacy_test/fleet_meta_optimizer_base.py index c0f2e2fc2a32f..11c05d75ce04b 100755 --- a/test/legacy_test/fleet_meta_optimizer_base.py +++ b/test/legacy_test/fleet_meta_optimizer_base.py @@ -41,9 +41,7 @@ def debug_program(self, main_prog, startup_prog): startup_prog_op_types = [op.type for op in startup_prog_ops] print( - "=== debug program and ops in func [{}] ===".format( - inspect.stack()[1].function - ) + f"=== debug program and ops in func [{inspect.stack()[1].function}] ===" ) print(main_prog) print(main_prog_op_types) diff --git a/test/legacy_test/gradient_checker.py b/test/legacy_test/gradient_checker.py index d146c22f08cf3..67e18075e60a0 100644 --- a/test/legacy_test/gradient_checker.py +++ b/test/legacy_test/gradient_checker.py @@ -321,11 +321,9 @@ def fail_test(msg): n = numerical[x_idx][y_idx] if not np.allclose(a, n, rtol, atol): msg = ( - 'Jacobian mismatch for output {} ' - 'with respect to input {} on {},\n' - 'numerical:{}\nanalytical:{}\n'.format( - y[y_idx].name, x[x_idx].name, str(place), n, a - ) + f'Jacobian mismatch for output {y[y_idx].name} ' + f'with respect to input {x[x_idx].name} on {str(place)},\n' + f'numerical:{n}\nanalytical:{a}\n' ) return fail_test(msg) return True diff --git a/test/legacy_test/test_chunk_eval_op.py b/test/legacy_test/test_chunk_eval_op.py index b059c04f8e0f4..b9db50079b4b3 100644 --- a/test/legacy_test/test_chunk_eval_op.py +++ b/test/legacy_test/test_chunk_eval_op.py @@ -25,11 +25,7 @@ def __init__(self, chunk_type, start_idx, end_idx): self.end_idx = end_idx def __str__(self): - return '(Segment: {}, {}, {})'.format( - self.chunk_type, - self.start_idx, - self.end_idx, - ) + return f'(Segment: {self.chunk_type}, {self.start_idx}, {self.end_idx})' __repr__ = __str__ diff --git a/test/legacy_test/test_detach.py b/test/legacy_test/test_detach.py index 5bb336866733a..53c252055bc68 100644 --- a/test/legacy_test/test_detach.py +++ b/test/legacy_test/test_detach.py @@ -214,9 +214,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c + var_d) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 1, 0 - ), + f"received tensor_version:{1} != wrapper_version_snapshot:{0}", ): loss.backward() diff --git a/test/legacy_test/test_dist_base.py b/test/legacy_test/test_dist_base.py index 2e6895b717579..db7d490e3a5af 100755 --- a/test/legacy_test/test_dist_base.py +++ b/test/legacy_test/test_dist_base.py @@ -1135,9 +1135,8 @@ def _run_local( envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '') cmd += " -m coverage run --branch -p" - cmd += " {} --role trainer --update_method local --lr {:f}".format( - model, - self._lr, + cmd += ( + f" {model} --role trainer --update_method local --lr {self._lr:f}" ) if batch_size != DEFAULT_BATCH_SIZE: @@ -1522,9 +1521,7 @@ def _run_cluster_gloo( tr_env["GLOG_vmodule"] = 'gloo_context=4' tr_env["GLOG_v"] = '3' print( - "use_hallreduce:{} tr_cmd:{}, env: {}".format( - self._use_hallreduce, tr_cmd, tr_env - ) + f"use_hallreduce:{self._use_hallreduce} tr_cmd:{tr_cmd}, env: {tr_env}" ) path = os.path.join( @@ -1596,9 +1593,7 @@ def _run_cluster_nccl2( ) tr_env.update(envs) print( - "use_hallreduce:{} tr_cmd:{}, env: {}".format( - self._use_hallreduce, tr_cmd, tr_env - ) + f"use_hallreduce:{self._use_hallreduce} tr_cmd:{tr_cmd}, env: {tr_env}" ) path = os.path.join( diff --git a/test/legacy_test/test_dist_fleet_base.py b/test/legacy_test/test_dist_fleet_base.py index ad421c228b0e5..94d6f836750b0 100644 --- a/test/legacy_test/test_dist_fleet_base.py +++ b/test/legacy_test/test_dist_fleet_base.py @@ -423,18 +423,14 @@ def is_listen_failed(logx): def catlog(logx): basename = os.path.basename(logx) print( - "\n================== Error {} begin =====================".format( - basename - ) + f"\n================== Error {basename} begin =====================" ) if not os.path.isfile(logx): raise FileNotFoundError(f"{logx} is not a file") os.system(f"cat {logx}") print( - "================== Error {} end =====================\n".format( - basename - ) + f"================== Error {basename} end =====================\n" ) if tr0_ret != 0 or tr1_ret != 0: diff --git a/test/legacy_test/test_eager_deletion_delete_vars.py b/test/legacy_test/test_eager_deletion_delete_vars.py index 7420e15981c27..e61cccc83b201 100644 --- a/test/legacy_test/test_eager_deletion_delete_vars.py +++ b/test/legacy_test/test_eager_deletion_delete_vars.py @@ -110,15 +110,9 @@ def assertScopeVar(self, scope, persitables, non_persistables): if t._is_initialized(): outline_np_vars.append(name) + print(f'Non-alive persistable vars {outline_p_vars} in {persitables}') print( - 'Non-alive persistable vars {} in {}'.format( - outline_p_vars, persitables - ) - ) - print( - 'Alive non-persistable vars {} in {}'.format( - outline_np_vars, non_persistables - ) + f'Alive non-persistable vars {outline_np_vars} in {non_persistables}' ) self.assertEqual(len(outline_p_vars), 0) self.assertEqual(len(outline_np_vars), 0) diff --git a/test/legacy_test/test_fused_dropout_add_op.py b/test/legacy_test/test_fused_dropout_add_op.py index 6466775f432da..699d44bb37891 100644 --- a/test/legacy_test/test_fused_dropout_add_op.py +++ b/test/legacy_test/test_fused_dropout_add_op.py @@ -101,8 +101,8 @@ def setUp(self): self.mode = mode self.seed = seed - cls_name = "{}_{}_{}_{}_{}_{}".format( - parent.__name__, dtype, mode, str(training), str(p), str(seed) + cls_name = ( + f"{parent.__name__}_{dtype}_{mode}_{str(training)}_{str(p)}_{str(seed)}" ) TestFusedDropoutAddCase.__name__ = cls_name globals()[cls_name] = TestFusedDropoutAddCase diff --git a/test/legacy_test/test_generate_proposals_op.py b/test/legacy_test/test_generate_proposals_op.py index 7ce1aa5e4666b..901d009effc5b 100644 --- a/test/legacy_test/test_generate_proposals_op.py +++ b/test/legacy_test/test_generate_proposals_op.py @@ -208,9 +208,7 @@ def clip_tiled_boxes(boxes, im_shape, pixel_offset=True): has shape (N, 4 * num_tiled_boxes).""" assert ( boxes.shape[1] % 4 == 0 - ), 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( - boxes.shape[1] - ) + ), f'boxes.shape[1] is {boxes.shape[1]:d}, but must be divisible by 4.' offset = 1 if pixel_offset else 0 # x1 >= 0 boxes[:, 0::4] = np.maximum( diff --git a/test/legacy_test/test_generator_dataloader.py b/test/legacy_test/test_generator_dataloader.py index 7de57eb3eb5ca..9216e5a437970 100644 --- a/test/legacy_test/test_generator_dataloader.py +++ b/test/legacy_test/test_generator_dataloader.py @@ -134,9 +134,9 @@ def run_main( for _ in range(EPOCH_NUM): step = 0 for d in py_reader(): - assert len(d) == len(places), "{} != {}".format( - len(d), len(places) - ) + assert len(d) == len( + places + ), f"{len(d)} != {len(places)}" for i, item in enumerate(d): image = item['image'] label = item['label'] diff --git a/test/legacy_test/test_imperative_resnet.py b/test/legacy_test/test_imperative_resnet.py index b91f840a57bf0..edcc1cf39cba0 100644 --- a/test/legacy_test/test_imperative_resnet.py +++ b/test/legacy_test/test_imperative_resnet.py @@ -171,9 +171,7 @@ def __init__(self, layers=50, class_dim=102, use_cudnn=True): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: depth = [3, 4, 6, 3] diff --git a/test/legacy_test/test_imperative_se_resnext.py b/test/legacy_test/test_imperative_se_resnext.py index 24bc9e56d0e1d..51ca82499b629 100644 --- a/test/legacy_test/test_imperative_se_resnext.py +++ b/test/legacy_test/test_imperative_se_resnext.py @@ -199,9 +199,7 @@ def __init__(self, layers=50, class_dim=102): supported_layers = [50, 101, 152] assert ( layers in supported_layers - ), "supported layers are {} but input layer is {}".format( - supported_layers, layers - ) + ), f"supported layers are {supported_layers} but input layer is {layers}" if layers == 50: cardinality = 32 diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 5b53b55f5f96a..676977ba2ac48 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -56,9 +56,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c + var_d) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 1, 0 - ), + f"received tensor_version:{1} != wrapper_version_snapshot:{0}", ): loss.backward() @@ -173,9 +171,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 1, 0 - ), + f"received tensor_version:{1} != wrapper_version_snapshot:{0}", ): loss.backward() @@ -890,9 +886,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 3, 0 - ), + f"received tensor_version:{3} != wrapper_version_snapshot:{0}", ): loss.backward() @@ -981,9 +975,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 2, 0 - ), + f"received tensor_version:{2} != wrapper_version_snapshot:{0}", ): loss.backward() @@ -1059,9 +1051,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 2, 0 - ), + f"received tensor_version:{2} != wrapper_version_snapshot:{0}", ): loss.backward() @@ -1357,9 +1347,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 2, 0 - ), + f"received tensor_version:{2} != wrapper_version_snapshot:{0}", ): loss.backward() @@ -1401,9 +1389,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 2, 0 - ), + f"received tensor_version:{2} != wrapper_version_snapshot:{0}", ): loss.backward() diff --git a/test/legacy_test/test_layers.py b/test/legacy_test/test_layers.py index 1055a7d8695d4..5d8087e7138d3 100644 --- a/test/legacy_test/test_layers.py +++ b/test/legacy_test/test_layers.py @@ -1509,9 +1509,7 @@ def test_all_layers(self): dy_result_value, rtol=1e-05, atol=0, - err_msg='Result of function [{}] compare failed'.format( - method.__name__ - ), + err_msg=f'Result of function [{method.__name__}] compare failed', ) continue @@ -1519,9 +1517,7 @@ def test_all_layers(self): np.testing.assert_array_equal( static_result[0], dy_result_value, - err_msg='Result of function [{}] not equal'.format( - method.__name__ - ), + err_msg=f'Result of function [{method.__name__}] not equal', ) def _get_np_data(self, shape, dtype, append_batch_size=True): diff --git a/test/legacy_test/test_lstm_cudnn_op.py b/test/legacy_test/test_lstm_cudnn_op.py index e36d4b01a866f..2d61b7c8f9a2d 100644 --- a/test/legacy_test/test_lstm_cudnn_op.py +++ b/test/legacy_test/test_lstm_cudnn_op.py @@ -384,7 +384,7 @@ def __init__( else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction) + f"received direction = {direction}" ) self.input_size = input_size diff --git a/test/legacy_test/test_multi_dot_op.py b/test/legacy_test/test_multi_dot_op.py index c18ee94fb01e6..28f8dde3c05c7 100644 --- a/test/legacy_test/test_multi_dot_op.py +++ b/test/legacy_test/test_multi_dot_op.py @@ -318,9 +318,7 @@ def test_out(self): expected_result, rtol=1e-05, atol=1e-05, - err_msg='two value is {}\n{}, check diff!'.format( - np_res, expected_result - ), + err_msg=f'two value is {np_res}\n{expected_result}, check diff!', ) def test_dygraph_without_out(self): diff --git a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py index cf7ff971ca711..845f4b0e80582 100644 --- a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py +++ b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py @@ -137,9 +137,7 @@ def run_main(self, num_workers, places, persistent_workers): for i in range(EPOCH_NUM): step = 0 for d in dataloader: - assert len(d) == len(places), "{} != {}".format( - len(d), len(places) - ) + assert len(d) == len(places), f"{len(d)} != {len(places)}" for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -239,9 +237,7 @@ def run_main(self, num_workers, places, persistent_workers): for i in range(EPOCH_NUM): step = 0 for d in dataloader: - assert len(d) == len(places), "{} != {}".format( - len(d), len(places) - ) + assert len(d) == len(places), f"{len(d)} != {len(places)}" for i, item in enumerate(d): image = item['image'] label = item['label'] diff --git a/test/legacy_test/test_multiprocess_dataloader_static.py b/test/legacy_test/test_multiprocess_dataloader_static.py index fed4534c52a02..2d465fd818df0 100644 --- a/test/legacy_test/test_multiprocess_dataloader_static.py +++ b/test/legacy_test/test_multiprocess_dataloader_static.py @@ -137,9 +137,7 @@ def run_main(self, num_workers, places, persistent_workers): for _ in range(EPOCH_NUM): step = 0 for d in dataloader: - assert len(d) == len(places), "{} != {}".format( - len(d), len(places) - ) + assert len(d) == len(places), f"{len(d)} != {len(places)}" for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -298,9 +296,7 @@ def run_main(self, num_workers, places, persistent_workers): for _ in range(EPOCH_NUM): step = 0 for d in dataloader: - assert len(d) == len(places), "{} != {}".format( - len(d), len(places) - ) + assert len(d) == len(places), f"{len(d)} != {len(places)}" for i, item in enumerate(d): image = item['image'] label = item['label'] diff --git a/test/legacy_test/test_ops_nms.py b/test/legacy_test/test_ops_nms.py index 8b95329eb56d3..0e6a5d9545543 100644 --- a/test/legacy_test/test_ops_nms.py +++ b/test/legacy_test/test_ops_nms.py @@ -227,9 +227,7 @@ def fun(x): np.testing.assert_array_equal( origin, res, - err_msg='origin out: {}\n inference model out: {}\n'.format( - origin, res - ), + err_msg=f'origin out: {origin}\n inference model out: {res}\n', ) def test_matrix_nms_dynamic(self): diff --git a/test/legacy_test/test_pylayer_op.py b/test/legacy_test/test_pylayer_op.py index 3f8b14f06d1f0..70bf6d947b37b 100644 --- a/test/legacy_test/test_pylayer_op.py +++ b/test/legacy_test/test_pylayer_op.py @@ -467,9 +467,7 @@ def forward(self, data): z = layer(data) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 1, 0 - ), + f"received tensor_version:{1} != wrapper_version_snapshot:{0}", ): z.backward() diff --git a/test/legacy_test/test_run.py b/test/legacy_test/test_run.py index d870e346cd269..e0ec7c9657fb5 100644 --- a/test/legacy_test/test_run.py +++ b/test/legacy_test/test_run.py @@ -89,9 +89,7 @@ def test_collective_1(self): def test_collective_2(self): log_dir = tempfile.TemporaryDirectory() - args = "--job_id test2 --devices 0,1,2 --log_dir {}".format( - log_dir.name - ) + args = f"--job_id test2 --devices 0,1,2 --log_dir {log_dir.name}" p = self.pdrun(args) p.wait() self.assertTrue(p.poll() == 0) @@ -166,11 +164,7 @@ def test_ps_1(self): def test_ps_2(self): log_dir = tempfile.TemporaryDirectory() - args = ( - "--job_id ps2 --server_num=2 --trainer_num=2 --log_dir {}".format( - log_dir.name - ) - ) + args = f"--job_id ps2 --server_num=2 --trainer_num=2 --log_dir {log_dir.name}" p = self.pdrun(args) p.wait() self.assertTrue(p.poll() == 0) diff --git a/test/legacy_test/test_sample_logits_op.py b/test/legacy_test/test_sample_logits_op.py index 4f29b62ae1744..64c70b5a8a07c 100644 --- a/test/legacy_test/test_sample_logits_op.py +++ b/test/legacy_test/test_sample_logits_op.py @@ -76,19 +76,13 @@ def test_check_output(self): ), f"Samples dtype is {Samples.dtype}, not int64" assert ( Probabilities.dtype == np.float64 - ), "Probabilities dtype is {}, not float64".format( - Probabilities.dtype - ) + ), f"Probabilities dtype is {Probabilities.dtype}, not float64" assert ( SampledLogits.dtype == np.float64 - ), "SampledLogits dtype is {}, not float64".format( - SampledLogits.dtype - ) + ), f"SampledLogits dtype is {SampledLogits.dtype}, not float64" assert ( SampledLabels.dtype == np.int64 - ), "SampledLabels dtype is {}, not int64".format( - SampledLabels.dtype - ) + ), f"SampledLabels dtype is {SampledLabels.dtype}, not int64" assert Samples.shape == (self.bs, self.NT + self.S) assert Probabilities.shape == (self.bs, self.NT + self.S) diff --git a/test/legacy_test/test_signal.py b/test/legacy_test/test_signal.py index 013ea22fe6f51..1d86f15f51095 100644 --- a/test/legacy_test/test_signal.py +++ b/test/legacy_test/test_signal.py @@ -73,9 +73,7 @@ def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None): threshold = tiny(S) elif threshold <= 0: - raise Exception( - "threshold={} must be strictly " "positive".format(threshold) - ) + raise Exception(f"threshold={threshold} must be strictly " "positive") if fill not in [None, False, True]: raise Exception(f"fill={fill} must be None or boolean") @@ -213,14 +211,13 @@ def dtype_r2c(d, default=np.complex64): def frame(x, frame_length, hop_length, axis=-1): if not isinstance(x, np.ndarray): raise Exception( - "Input must be of type numpy.ndarray, " - "given type(x)={}".format(type(x)) + "Input must be of type numpy.ndarray, " f"given type(x)={type(x)}" ) if x.shape[axis] < frame_length: raise Exception( - "Input is too short (n={:d})" - " for frame_length={:d}".format(x.shape[axis], frame_length) + f"Input is too short (n={x.shape[axis]:d})" + f" for frame_length={frame_length:d}" ) if hop_length < 1: @@ -228,18 +225,14 @@ def frame(x, frame_length, hop_length, axis=-1): if axis == -1 and not x.flags["F_CONTIGUOUS"]: print( - "librosa.util.frame called with axis={} " - "on a non-contiguous input. This will result in a copy.".format( - axis - ) + f"librosa.util.frame called with axis={axis} " + "on a non-contiguous input. This will result in a copy." ) x = np.asfortranarray(x) elif axis == 0 and not x.flags["C_CONTIGUOUS"]: print( - "librosa.util.frame called with axis={} " - "on a non-contiguous input. This will result in a copy.".format( - axis - ) + f"librosa.util.frame called with axis={axis} " + "on a non-contiguous input. This will result in a copy." ) x = np.ascontiguousarray(x) @@ -274,9 +267,7 @@ def pad_center(data, size, axis=-1, **kwargs): if lpad < 0: raise Exception( - ("Target size ({:d}) must be " "at least input size ({:d})").format( - size, n - ) + f"Target size ({size:d}) must be " f"at least input size ({n:d})" ) return np.pad(data, lengths, **kwargs) @@ -295,9 +286,7 @@ def get_window(window, Nx, fftbins=True): if len(window) == Nx: return np.asarray(window) - raise Exception( - "Window size mismatch: " "{:d} != {:d}".format(len(window), Nx) - ) + raise Exception("Window size mismatch: " f"{len(window):d} != {Nx:d}") else: raise Exception(f"Invalid window specification: {window}") @@ -350,18 +339,14 @@ def stft( if center: if n_fft > y.shape[-1]: print( - "n_fft={} is too small for input signal of length={}".format( - n_fft, y.shape[-1] - ) + f"n_fft={n_fft} is too small for input signal of length={y.shape[-1]}" ) y = np.pad(y, int(n_fft // 2), mode=pad_mode) elif n_fft > y.shape[-1]: raise Exception( - "n_fft={} is too large for input signal of length={}".format( - n_fft, y.shape[-1] - ) + f"n_fft={n_fft} is too large for input signal of length={y.shape[-1]}" ) # Window the time series. diff --git a/test/legacy_test/test_static_save_load.py b/test/legacy_test/test_static_save_load.py index 09e204e62191e..b46f49dbfefc9 100644 --- a/test/legacy_test/test_static_save_load.py +++ b/test/legacy_test/test_static_save_load.py @@ -926,9 +926,7 @@ def set_var(var, ndarray): for v in parameter_list: assert ( v.name in load_dict - ), "Can not find [{}] in model file [{}]".format( - v.name, parameter_file_name - ) + ), f"Can not find [{v.name}] in model file [{parameter_file_name}]" new_v = new_scope.find_var(v.name) set_var(new_v, load_dict[v.name]) @@ -949,9 +947,7 @@ def set_var(var, ndarray): for v in opt_list: assert ( v.name in load_dict - ), "Can not find [{}] in model file [{}]".format( - v.name, opt_file_name - ) + ), f"Can not find [{v.name}] in model file [{opt_file_name}]" new_v = new_scope.find_var(v.name) set_var(new_v, load_dict[v.name]) diff --git a/test/legacy_test/test_sync_batch_norm_op.py b/test/legacy_test/test_sync_batch_norm_op.py index 68cb93c31d91e..0375ee7c52776 100644 --- a/test/legacy_test/test_sync_batch_norm_op.py +++ b/test/legacy_test/test_sync_batch_norm_op.py @@ -216,9 +216,7 @@ def _compare_impl(self, place, layout, only_forward): for id in range(core.get_cuda_device_count()): filepath = os.path.join( self.data_dir.name, - 'input_{}_{}_{}_{}.npy'.format( - id, only_forward, str(self.dtype.__name__), layout - ), + f'input_{id}_{only_forward}_{str(self.dtype.__name__)}_{layout}.npy', ) np.save(filepath, data[id * stride : (id + 1) * stride]) data = create_or_get_tensor( @@ -282,9 +280,7 @@ def _compare_impl(self, place, layout, only_forward): bn_val = bn_fetches[i] file_path = os.path.join( self.data_dir.name, - 'output_{}_{}_{}_{}.npy'.format( - 0, only_forward, self.dtype.__name__, i - ), + f'output_{0}_{only_forward}_{self.dtype.__name__}_{i}.npy', ) sync_bn_val = np.load(file_path) if sync_bn_val.shape != bn_val.shape: diff --git a/test/legacy_test/test_translated_layer.py b/test/legacy_test/test_translated_layer.py index 6e87f24bb13e9..8d8a9d919f366 100644 --- a/test/legacy_test/test_translated_layer.py +++ b/test/legacy_test/test_translated_layer.py @@ -72,9 +72,7 @@ def train(layer, loader, loss_fn, opt): opt.step() opt.clear_grad() print( - "Epoch {} batch {}: loss = {}".format( - epoch_id, batch_id, np.mean(loss.numpy()) - ) + f"Epoch {epoch_id} batch {batch_id}: loss = {np.mean(loss.numpy())}" ) return loss @@ -158,9 +156,7 @@ def load_and_fine_tuning(self): np.testing.assert_array_equal( orig_loss.numpy(), loss.numpy(), - err_msg='original loss:\n{}\nnew loss:\n{}\n'.format( - orig_loss.numpy(), loss.numpy() - ), + err_msg=f'original loss:\n{orig_loss.numpy()}\nnew loss:\n{loss.numpy()}\n', ) def test_get_program(self): diff --git a/test/legacy_test/test_tril_triu_op.py b/test/legacy_test/test_tril_triu_op.py index fee9b6f95023a..a3add39f00f3f 100644 --- a/test/legacy_test/test_tril_triu_op.py +++ b/test/legacy_test/test_tril_triu_op.py @@ -100,16 +100,12 @@ def case_generator(op_type, Xshape, diagonal, expected, dtype): If arg`expercted` is 'success', it will register an Optest case and expect to pass. Otherwise, it will register an API case and check the expect failure. """ - cls_name = "{}_{}_shape_{}_diag_{}_dtype_{}".format( - expected, op_type, Xshape, diagonal, dtype + cls_name = ( + f"{expected}_{op_type}_shape_{Xshape}_diag_{diagonal}_dtype_{dtype}" ) errmsg = { - "diagonal: TypeError": "diagonal in {} must be a python Int".format( - op_type - ), - "input: ValueError": "x shape in {} must be at least 2-D".format( - op_type - ), + "diagonal: TypeError": f"diagonal in {op_type} must be a python Int", + "input: ValueError": f"x shape in {op_type} must be at least 2-D", } class FailureCase(unittest.TestCase): diff --git a/test/legacy_test/test_variable.py b/test/legacy_test/test_variable.py index 024527f1332c4..2daf7017bbcae 100644 --- a/test/legacy_test/test_variable.py +++ b/test/legacy_test/test_variable.py @@ -1046,16 +1046,12 @@ def test_static_graph_tensor_index_setitem_muti_dim(self): np.testing.assert_array_equal( array2, setitem_pp[0], - err_msg='\n numpy:{},\n paddle:{}'.format( - array2, setitem_pp[0] - ), + err_msg=f'\n numpy:{array2},\n paddle:{setitem_pp[0]}', ) np.testing.assert_array_equal( array3, setitem_pp[1], - err_msg='\n numpy:{},\n paddle:{}'.format( - array3, setitem_pp[1] - ), + err_msg=f'\n numpy:{array3},\n paddle:{setitem_pp[1]}', ) array = array[0] index1 = index1[0] @@ -1122,31 +1118,23 @@ def test_static_graph_array_index_muti_dim(self): np.testing.assert_array_equal( array2, setitem_pp[0], - err_msg='\n numpy:{},\n paddle:{}'.format( - array2, setitem_pp[0] - ), + err_msg=f'\n numpy:{array2},\n paddle:{setitem_pp[0]}', ) np.testing.assert_array_equal( array3, setitem_pp[1], - err_msg='\n numpy:{},\n paddle:{}'.format( - array3, setitem_pp[1] - ), + err_msg=f'\n numpy:{array3},\n paddle:{setitem_pp[1]}', ) np.testing.assert_array_equal( y_np1, setitem_pp[2], - err_msg='\n numpy:{},\n paddle:{}'.format( - y_np1, setitem_pp[2] - ), + err_msg=f'\n numpy:{y_np1},\n paddle:{setitem_pp[2]}', ) np.testing.assert_array_equal( y_np2, setitem_pp[3], - err_msg='\n numpy:{},\n paddle:{}'.format( - y_np2, setitem_pp[3] - ), + err_msg=f'\n numpy:{y_np2},\n paddle:{setitem_pp[3]}', ) array = array[0] index1 = index1[0] @@ -1205,9 +1193,7 @@ def test_dygraph_array_index_muti_dim(self): np.testing.assert_array_equal( tensor1.numpy(), array1, - err_msg='\n numpy:{},\n paddle:{}'.format( - array1, tensor1.numpy() - ), + err_msg=f'\n numpy:{array1},\n paddle:{tensor1.numpy()}', ) # 1 dim setitem array2 = array.copy() @@ -1219,9 +1205,7 @@ def test_dygraph_array_index_muti_dim(self): np.testing.assert_array_equal( tensor2.numpy(), array2, - err_msg='\n numpy:{},\n paddle:{}'.format( - array2, tensor2.numpy() - ), + err_msg=f'\n numpy:{array2},\n paddle:{tensor2.numpy()}', ) array = array[0] diff --git a/test/legacy_test/test_view_op_reuse_allocation.py b/test/legacy_test/test_view_op_reuse_allocation.py index ea48c9addb5b3..0b99f42a6bab4 100644 --- a/test/legacy_test/test_view_op_reuse_allocation.py +++ b/test/legacy_test/test_view_op_reuse_allocation.py @@ -80,9 +80,7 @@ def test_backward_error(self): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegex( RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}".format( - 1, 0 - ), + f"received tensor_version:{1} != wrapper_version_snapshot:{0}", ): loss.backward() diff --git a/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py b/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py index 71e484f87569b..dec8a27bcd394 100644 --- a/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py +++ b/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py @@ -164,8 +164,8 @@ def tearDown(self): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) diff --git a/test/ps/static_gpubox_trainer.py b/test/ps/static_gpubox_trainer.py index 5940e3942cac1..9b4d07e9ef70d 100755 --- a/test/ps/static_gpubox_trainer.py +++ b/test/ps/static_gpubox_trainer.py @@ -173,9 +173,7 @@ def dataset_train_loop(self, epoch): start_time = time.time() self.reader.load_into_memory() print( - "self.reader.load_into_memory cost :{} seconds".format( - time.time() - start_time - ) + f"self.reader.load_into_memory cost :{time.time() - start_time} seconds" ) begin_pass_time = time.time() diff --git a/test/quantization/quant2_int8_image_classification_comparison.py b/test/quantization/quant2_int8_image_classification_comparison.py index 5c9954df91118..71505e7f84ee6 100644 --- a/test/quantization/quant2_int8_image_classification_comparison.py +++ b/test/quantization/quant2_int8_image_classification_comparison.py @@ -113,9 +113,7 @@ def reader(): while step < num: fp.seek(imgs_offset + img_size * step) img = fp.read(img_size) - img = struct.unpack_from( - '{}f'.format(img_ch * img_w * img_h), img - ) + img = struct.unpack_from(f'{img_ch * img_w * img_h}f', img) img = np.array(img) img.shape = (img_ch, img_w, img_h) fp.seek(labels_offset + label_size * step) @@ -310,17 +308,11 @@ def _predict( return outputs, acc1_avg, acc5_avg, fps_avg, latency_avg def _print_performance(self, title, fps, lat): - _logger.info( - '{}: avg fps: {:.2f}, avg latency: {:.4f} ms'.format( - title, fps, lat - ) - ) + _logger.info(f'{title}: avg fps: {fps:.2f}, avg latency: {lat:.4f} ms') def _print_accuracy(self, title, acc1, acc5): _logger.info( - '{}: avg top1 accuracy: {:.4f}, avg top5 accuracy: {:.4f}'.format( - title, acc1, acc5 - ) + f'{title}: avg top1 accuracy: {acc1:.4f}, avg top5 accuracy: {acc5:.4f}' ) def _summarize_performance(self, int8_fps, int8_lat, fp32_fps, fp32_lat): diff --git a/test/quantization/quant2_int8_lstm_model.py b/test/quantization/quant2_int8_lstm_model.py index 5bbb378e9c35e..8cfa3ab04666e 100644 --- a/test/quantization/quant2_int8_lstm_model.py +++ b/test/quantization/quant2_int8_lstm_model.py @@ -251,21 +251,15 @@ def test_lstm_model(self): ) print( - "FP32: fps {}, hx_acc {}, ctc_acc {}".format( - fp32_fps, fp32_hx_acc, fp32_ctc_acc - ) + f"FP32: fps {fp32_fps}, hx_acc {fp32_hx_acc}, ctc_acc {fp32_ctc_acc}" ) print( - "PTQ_INT8: fps {}, hx_acc {}, ctc_acc {}".format( - int8_fps, int8_hx_acc, int8_ctc_acc - ) + f"PTQ_INT8: fps {int8_fps}, hx_acc {int8_hx_acc}, ctc_acc {int8_ctc_acc}" ) print( - "QAT: fps {}, hx_acc {}, ctc_acc {}".format( - quant_fps, quant_hx_acc, quant_ctc_acc - ) + f"QAT: fps {quant_fps}, hx_acc {quant_hx_acc}, ctc_acc {quant_ctc_acc}" ) sys.stdout.flush() diff --git a/test/quantization/quant_int8_image_classification_comparison.py b/test/quantization/quant_int8_image_classification_comparison.py index cc0a6ad32ffc2..7d04939ee3731 100644 --- a/test/quantization/quant_int8_image_classification_comparison.py +++ b/test/quantization/quant_int8_image_classification_comparison.py @@ -92,9 +92,7 @@ def reader(): while step < num: fp.seek(imgs_offset + img_size * step) img = fp.read(img_size) - img = struct.unpack_from( - '{}f'.format(img_ch * img_w * img_h), img - ) + img = struct.unpack_from(f'{img_ch * img_w * img_h}f', img) img = np.array(img) img.shape = (img_ch, img_w, img_h) fp.seek(labels_offset + label_size * step) @@ -261,14 +259,10 @@ def _predict( def _summarize_performance(self, fp32_fps, fp32_lat, int8_fps, int8_lat): _logger.info('--- Performance summary ---') _logger.info( - 'FP32: avg fps: {:.2f}, avg latency: {:.4f} ms'.format( - fp32_fps, fp32_lat - ) + f'FP32: avg fps: {fp32_fps:.2f}, avg latency: {fp32_lat:.4f} ms' ) _logger.info( - 'INT8: avg fps: {:.2f}, avg latency: {:.4f} ms'.format( - int8_fps, int8_lat - ) + f'INT8: avg fps: {int8_fps:.2f}, avg latency: {int8_lat:.4f} ms' ) def _compare_accuracy( diff --git a/test/quantization/test_imperative_ptq.py b/test/quantization/test_imperative_ptq.py index 8ad6fa2832e93..189be58754c9f 100644 --- a/test/quantization/test_imperative_ptq.py +++ b/test/quantization/test_imperative_ptq.py @@ -91,8 +91,8 @@ def setUpClass(cls): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) diff --git a/test/quantization/test_imperative_qat_amp.py b/test/quantization/test_imperative_qat_amp.py index 4dba9c5421df4..29b7b28df5815 100644 --- a/test/quantization/test_imperative_qat_amp.py +++ b/test/quantization/test_imperative_qat_amp.py @@ -68,8 +68,8 @@ def tearDownClass(cls): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) diff --git a/test/quantization/test_post_training_quantization_lstm_model.py b/test/quantization/test_post_training_quantization_lstm_model.py index 81f68fd2b3986..76f861eb8bd7e 100644 --- a/test/quantization/test_post_training_quantization_lstm_model.py +++ b/test/quantization/test_post_training_quantization_lstm_model.py @@ -45,11 +45,7 @@ def setUp(self): try: os.system("mkdir -p " + self.int8_model_path) except Exception as e: - print( - "Failed to create {} due to {}".format( - self.int8_model_path, str(e) - ) - ) + print(f"Failed to create {self.int8_model_path} due to {str(e)}") sys.exit(-1) def tearDown(self): @@ -57,8 +53,8 @@ def tearDown(self): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) @@ -253,9 +249,7 @@ def run_test( data_path = os.path.join(data_path, data_name) print( - "Start FP32 inference for {} on {} samples ...".format( - model_name, infer_iterations - ) + f"Start FP32 inference for {model_name} on {infer_iterations} samples ..." ) (fp32_latency, fp32_acc) = self.run_program( fp32_model_path, @@ -287,9 +281,7 @@ def run_test( ) print( - "Start INT8 inference for {} on {} samples ...".format( - model_name, infer_iterations - ) + f"Start INT8 inference for {model_name} on {infer_iterations} samples ..." ) (int8_latency, int8_acc) = self.run_program( self.int8_model_path, diff --git a/test/quantization/test_post_training_quantization_mnist.py b/test/quantization/test_post_training_quantization_mnist.py index cef001a8a60ef..2ff3f4e29ab68 100644 --- a/test/quantization/test_post_training_quantization_mnist.py +++ b/test/quantization/test_post_training_quantization_mnist.py @@ -61,11 +61,7 @@ def setUp(self): os.system("mkdir -p " + self.int8_model_path) os.system("mkdir -p " + self.cache_folder) except Exception as e: - print( - "Failed to create {} due to {}".format( - self.int8_model_path, str(e) - ) - ) + print(f"Failed to create {self.int8_model_path} due to {str(e)}") sys.exit(-1) def tearDown(self): @@ -73,8 +69,8 @@ def tearDown(self): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) @@ -99,9 +95,7 @@ def download(self, url, dirname, md5sum, save_name=None): retry += 1 else: raise RuntimeError( - "Cannot download {} within retry limit {}".format( - url, retry_limit - ) + f"Cannot download {url} within retry limit {retry_limit}" ) sys.stderr.write( f"Cache file {filename} not found, downloading {url} \n" @@ -146,9 +140,7 @@ def download_model(self, data_url, data_md5, folder_name): file_name = data_url.split('/')[-1] zip_path = os.path.join(self.cache_folder, file_name) print( - 'Data is downloaded at {}. File exists: {}'.format( - zip_path, os.path.exists(zip_path) - ) + f'Data is downloaded at {zip_path}. File exists: {os.path.exists(zip_path)}' ) data_cache_folder = os.path.join(self.cache_folder, folder_name) @@ -164,9 +156,7 @@ def run_program( infer_iterations, ): print( - "test model path: {}. File exists: {}".format( - model_path, os.path.exists(model_path) - ) + f"test model path: {model_path}. File exists: {os.path.exists(model_path)}" ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) diff --git a/test/quantization/test_post_training_quantization_mobilenetv1.py b/test/quantization/test_post_training_quantization_mobilenetv1.py index 75caf3d9c908d..4500f61ca13dc 100644 --- a/test/quantization/test_post_training_quantization_mobilenetv1.py +++ b/test/quantization/test_post_training_quantization_mobilenetv1.py @@ -168,8 +168,8 @@ def tearDown(self): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) diff --git a/test/quantization/test_post_training_quantization_while.py b/test/quantization/test_post_training_quantization_while.py index d515fc59cd4f9..9a169b27c513a 100644 --- a/test/quantization/test_post_training_quantization_while.py +++ b/test/quantization/test_post_training_quantization_while.py @@ -59,22 +59,14 @@ def setUp(self): try: os.system("mkdir -p " + self.int8_model_path) except Exception as e: - print( - "Failed to create {} due to {}".format( - self.int8_model_path, str(e) - ) - ) + print(f"Failed to create {self.int8_model_path} due to {str(e)}") sys.exit(-1) def tearDown(self): try: os.system(f"rm -rf {self.int8_model_path}") except Exception as e: - print( - "Failed to delete {} due to {}".format( - self.int8_model_path, str(e) - ) - ) + print(f"Failed to delete {self.int8_model_path} due to {str(e)}") def cache_unzipping(self, target_folder, zip_path): cmd = f'tar xf {zip_path} -C {target_folder}' diff --git a/test/quantization/test_quant_post_quant_aware.py b/test/quantization/test_quant_post_quant_aware.py index a387f8bd230ee..0fe582306fbd7 100644 --- a/test/quantization/test_quant_post_quant_aware.py +++ b/test/quantization/test_quant_post_quant_aware.py @@ -109,9 +109,7 @@ def train(program): iter += 1 if iter % 100 == 0: logging.info( - 'train iter={}, avg loss {}, acc_top1 {}'.format( - iter, cost, top1 - ) + f'train iter={iter}, avg loss {cost}, acc_top1 {top1}' ) def test(program): @@ -124,16 +122,12 @@ def test(program): iter += 1 if iter % 100 == 0: logging.info( - 'eval iter={}, avg loss {}, acc_top1 {}'.format( - iter, cost, top1 - ) + f'eval iter={iter}, avg loss {cost}, acc_top1 {top1}' ) result[0].append(cost) result[1].append(top1) logging.info( - ' avg loss {}, acc_top1 {}'.format( - np.mean(result[0]), np.mean(result[1]) - ) + f' avg loss {np.mean(result[0])}, acc_top1 {np.mean(result[1])}' ) return np.mean(result[1]) diff --git a/test/quantization/test_weight_quantization_mobilenetv1.py b/test/quantization/test_weight_quantization_mobilenetv1.py index 0f8001a20edff..5ccee4b6c14bd 100644 --- a/test/quantization/test_weight_quantization_mobilenetv1.py +++ b/test/quantization/test_weight_quantization_mobilenetv1.py @@ -67,8 +67,8 @@ def download_model(self, model_name, data_url, data_md5): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path + cmd = ( + f'mkdir {target_folder} && tar xf {zip_path} -C {target_folder}' ) os.system(cmd) diff --git a/test/rnn/rnn_numpy.py b/test/rnn/rnn_numpy.py index dc115d0734ba5..d1a7ccbf02ecb 100644 --- a/test/rnn/rnn_numpy.py +++ b/test/rnn/rnn_numpy.py @@ -482,7 +482,7 @@ def __init__( else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction) + f"received direction = {direction}" ) self.input_size = input_size @@ -526,7 +526,7 @@ def __init__( else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction) + f"received direction = {direction}" ) self.input_size = input_size @@ -570,7 +570,7 @@ def __init__( else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction) + f"received direction = {direction}" ) self.input_size = input_size diff --git a/test/tokenizer/bert_tokenizer.py b/test/tokenizer/bert_tokenizer.py index ac122e8c709bf..f8c7f4c55293a 100755 --- a/test/tokenizer/bert_tokenizer.py +++ b/test/tokenizer/bert_tokenizer.py @@ -327,11 +327,9 @@ def __init__( ): if not os.path.isfile(vocab_file): raise ValueError( - "Can't find a vocabulary file at path '{}'. To load the " + f"Can't find a vocabulary file at path '{vocab_file}'. To load the " "vocabulary from a pretrained model please use " - "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format( - vocab_file - ) + "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token) self.do_lower_case = do_lower_case diff --git a/test/tokenizer/tokenizer_utils.py b/test/tokenizer/tokenizer_utils.py index 467d5e7cf0ced..5ce07e29b6d72 100644 --- a/test/tokenizer/tokenizer_utils.py +++ b/test/tokenizer/tokenizer_utils.py @@ -564,9 +564,7 @@ def save_pretrained(self, save_directory): """ assert not os.path.isfile( save_directory - ), "Saving directory ({}) should be a directory, not a file".format( - save_directory - ) + ), f"Saving directory ({save_directory}) should be a directory, not a file" os.makedirs(save_directory, exist_ok=True) tokenizer_config_file = os.path.join( @@ -632,9 +630,7 @@ def __getattr__(self, name): elif name.endswith('_token_id'): return self.vocab[self.special_tokens_map[name[:-3]]] raise AttributeError( - "'{}' object has no attribute '{}'".format( - type(self).__name__, name - ) + f"'{type(self).__name__}' object has no attribute '{name}'" ) def truncate_sequences( diff --git a/test/xpu/test_generate_proposals_v2_op_xpu.py b/test/xpu/test_generate_proposals_v2_op_xpu.py index 024d09603b7d9..dca37a4cd2e73 100644 --- a/test/xpu/test_generate_proposals_v2_op_xpu.py +++ b/test/xpu/test_generate_proposals_v2_op_xpu.py @@ -105,9 +105,7 @@ def clip_tiled_boxes(boxes, im_shape, pixel_offset=True): has shape (N, 4 * num_tiled_boxes).""" assert ( boxes.shape[1] % 4 == 0 - ), 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( - boxes.shape[1] - ) + ), f'boxes.shape[1] is {boxes.shape[1]:d}, but must be divisible by 4.' offset = 1 if pixel_offset else 0 # x1 >= 0 boxes[:, 0::4] = np.maximum( diff --git a/test/xpu/test_tril_triu_op_xpu.py b/test/xpu/test_tril_triu_op_xpu.py index 15371d894fa8d..c9279a391be7e 100644 --- a/test/xpu/test_tril_triu_op_xpu.py +++ b/test/xpu/test_tril_triu_op_xpu.py @@ -134,9 +134,7 @@ def test_errors1(self): data = paddle.static.data(shape=(20, 22), dtype='float32', name="data1") op_type = np.random.choice(['triu', 'tril']) errmsg = { - "diagonal: TypeError": "diagonal in {} must be a python Int".format( - op_type - ), + "diagonal: TypeError": f"diagonal in {op_type} must be a python Int", } expected = list(errmsg.keys())[0] with self.assertRaisesRegex( @@ -149,9 +147,7 @@ def test_errors2(self): data = paddle.static.data(shape=(200,), dtype='float32', name="data2") op_type = np.random.choice(['triu', 'tril']) errmsg = { - "input: ValueError": "x shape in {} must be at least 2-D".format( - op_type - ), + "input: ValueError": f"x shape in {op_type} must be at least 2-D", } expected = list(errmsg.keys())[0] with self.assertRaisesRegex( diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index f6c86619998aa..2f2d8b472c566 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -22,16 +22,8 @@ def analysisPyXml(rootPath, ut): xml_path = f'{rootPath}/build/pytest/{ut}/python-coverage.xml' - related_ut_map_file = '{}/build/ut_map/{}/related_{}.txt'.format( - rootPath, - ut, - ut, - ) - notrelated_ut_map_file = '{}/build/ut_map/{}/notrelated_{}.txt'.format( - rootPath, - ut, - ut, - ) + related_ut_map_file = f'{rootPath}/build/ut_map/{ut}/related_{ut}.txt' + notrelated_ut_map_file = f'{rootPath}/build/ut_map/{ut}/notrelated_{ut}.txt' tree = ElementTree.parse(xml_path) root = tree.getroot() error_files = [] diff --git a/tools/check_op_benchmark_result.py b/tools/check_op_benchmark_result.py index 8db2e850ae290..335f7715489b8 100644 --- a/tools/check_op_benchmark_result.py +++ b/tools/check_op_benchmark_result.py @@ -72,7 +72,7 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result): develop_gpu_time = develop_data.get("gpu_time") if develop_gpu_time != 0.0: gpu_time_diff = (pr_gpu_time - develop_gpu_time) / develop_gpu_time - gpu_time_diff_str = "{:.5f}".format(gpu_time_diff * 100) + gpu_time_diff_str = f"{gpu_time_diff * 100:.5f}" else: gpu_time_diff = 0 gpu_time_diff_str = "" diff --git a/tools/check_op_desc.py b/tools/check_op_desc.py index 89a5e87af0b45..2eb8df32cc7c0 100644 --- a/tools/check_op_desc.py +++ b/tools/check_op_desc.py @@ -343,16 +343,12 @@ def print_desc_error_message(error_message): for name in Inputs_error.get(QUANT, {}): print( - " * The added Input '{}' is `quant`, need slim to review.".format( - name - ) + f" * The added Input '{name}' is `quant`, need slim to review." ) for name in Inputs_error.get(DEF, {}): print( - " * The added Input '{}' is `def`, need inference to review.".format( - name - ) + f" * The added Input '{name}' is `def`, need inference to review." ) # 2. print outputs error message @@ -375,16 +371,12 @@ def print_desc_error_message(error_message): for name in Outputs_error.get(QUANT, {}): print( - " * The added Output '{}' is `quant`, need slim to review.".format( - name - ) + f" * The added Output '{name}' is `quant`, need slim to review." ) for name in Outputs_error.get(DEF, {}): print( - " * The added Output '{}' is `def`, need inference to review.".format( - name - ) + f" * The added Output '{name}' is `def`, need inference to review." ) # 3. print attrs error message @@ -408,17 +400,13 @@ def print_desc_error_message(error_message): for name in attrs_error.get(QUANT, {}): # TODO(Wilber): print( - " * The added attr '{}' is `quant`, need slim to review.".format( - name - ) + f" * The added attr '{name}' is `quant`, need slim to review." ) for name in attrs_error.get(DEF, {}): # TODO(Wilber): print( - " * The added attr '{}' is `def`, need inference to review.".format( - name - ) + f" * The added attr '{name}' is `def`, need inference to review." ) @@ -435,22 +423,14 @@ def print_version_error_message(error_message): error_list = inputs_error.get(ADD, []) if error_list: for tup in error_list: - print( - " * The added input '{}' is not yet registered.".format( - tup[1] - ) - ) + print(f" * The added input '{tup[1]}' is not yet registered.") # 2. print outputs error message outputs_error = error_message.get(op_name, {}).get(OUTPUTS, {}) error_list = outputs_error.get(ADD, []) if error_list: for tup in error_list: - print( - " * The added output '{}' is not yet registered.".format( - tup[1] - ) - ) + print(f" * The added output '{tup[1]}' is not yet registered.") # 3. print attrs error message attrs_error = error_message.get(op_name, {}).get(ATTRS, {}) @@ -458,19 +438,13 @@ def print_version_error_message(error_message): if error_list: for tup in error_list: print( - " * The added attribute '{}' is not yet registered.".format( - tup[1] - ) + f" * The added attribute '{tup[1]}' is not yet registered." ) error_dic = ( error_message.get(op_name, {}).get(ATTRS, {}).get(CHANGE, {}) ) for key, val in error_dic.items(): - print( - " * The change of attribute '{}' is not yet registered.".format( - key - ) - ) + print(f" * The change of attribute '{key}' is not yet registered.") def print_repeat_process(): diff --git a/tools/count_api_without_core_ops.py b/tools/count_api_without_core_ops.py index 90d5e48f5167c..b90bdf0ca0e9b 100644 --- a/tools/count_api_without_core_ops.py +++ b/tools/count_api_without_core_ops.py @@ -38,9 +38,7 @@ def md5(doc): except UnicodeDecodeError as e: md5sum = None print( - "Error({}) occurred when `md5({})`, discard it.".format( - str(e), doc - ), + f"Error({str(e)}) occurred when `md5({doc})`, discard it.", file=sys.stderr, ) return md5sum @@ -111,9 +109,7 @@ def visit_member(parent_name, member, func): return else: raise RuntimeError( - "Unsupported generate signature of member, type {}".format( - str(type(member)) - ) + f"Unsupported generate signature of member, type {str(type(member))}" ) diff --git a/tools/coverage/gcda_clean.py b/tools/coverage/gcda_clean.py index 96f5b54a854ee..2abba39636d07 100644 --- a/tools/coverage/gcda_clean.py +++ b/tools/coverage/gcda_clean.py @@ -39,11 +39,7 @@ def get_pull(pull_id): repo = github.get_repo('PaddlePaddle/Paddle') except Exception as e: print(e) - print( - "get_repo error, retry {} times after {} secs.".format( - idx, idx * 10 - ) - ) + print(f"get_repo error, retry {idx} times after {idx * 10} secs.") else: break idx += 1 diff --git a/tools/coverage/python_coverage.py b/tools/coverage/python_coverage.py index 7132b119b4c0e..b28e3ef08b9d1 100644 --- a/tools/coverage/python_coverage.py +++ b/tools/coverage/python_coverage.py @@ -61,20 +61,12 @@ taken = int(taken) for _ in range(taken): - print( - 'BRDA:{},{},{},{}'.format( - line_number, 0, branch_index, line_hits - ) - ) + print(f'BRDA:{line_number},{0},{branch_index},{line_hits}') branch_index += 1 if line_missing_branches: for missing_branch in line_missing_branches.split(','): - print( - 'BRDA:{},{},{},{}'.format( - line_number, 0, branch_index, 0 - ) - ) + print(f'BRDA:{line_number},{0},{branch_index},{0}') branch_index += 1 print(f'DA:{line_number},{line_hits}') diff --git a/tools/externalError/spider.py b/tools/externalError/spider.py index 7f57f81b584e5..a078cd2debbe8 100644 --- a/tools/externalError/spider.py +++ b/tools/externalError/spider.py @@ -361,10 +361,7 @@ def handle_data(self, data): status, code, desc = re.split('=|//', line.strip()) _Messages = allMessageDesc.messages.add() _Messages.code = int(code.strip(' ,')) - _Messages.message = "'{}'. {}".format( - status.strip(), - desc.strip(), - ) + _Messages.message = f"'{status.strip()}'. {desc.strip()}" CUFFTHTMLParser().feed(html) diff --git a/tools/get_single_test_cov.py b/tools/get_single_test_cov.py index a111ea61c6c89..a710e7792e4a5 100644 --- a/tools/get_single_test_cov.py +++ b/tools/get_single_test_cov.py @@ -67,15 +67,9 @@ def getFNDAFile(rootPath, test): def analysisFNDAFile(rootPath, test): - related_ut_map_file = '{}/build/ut_map/{}/related_{}.txt'.format( - rootPath, - test, - test, - ) - notrelated_ut_map_file = '{}/build/ut_map/{}/notrelated_{}.txt'.format( - rootPath, - test, - test, + related_ut_map_file = f'{rootPath}/build/ut_map/{test}/related_{test}.txt' + notrelated_ut_map_file = ( + f'{rootPath}/build/ut_map/{test}/notrelated_{test}.txt' ) os.system('touch %s' % related_ut_map_file) os.system('touch %s' % notrelated_ut_map_file) diff --git a/tools/parse_kernel_info.py b/tools/parse_kernel_info.py index 23106ab0d2ebb..19a70bbb22e33 100644 --- a/tools/parse_kernel_info.py +++ b/tools/parse_kernel_info.py @@ -80,7 +80,7 @@ def __str__(self): percent = float(self.num_ops_for_dtypes[dtype]) / float( num_floats ) - res += "({:.2f}%)".format(percent * 100) + res += f"({percent * 100:.2f}%)" else: res += f"({0:.2f}%)" res += " " diff --git a/tools/print_signatures.py b/tools/print_signatures.py index cdae91ece7023..ff03a33dc2e85 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -54,9 +54,7 @@ def md5(doc): except UnicodeDecodeError as e: md5sum = None print( - "Error({}) occurred when `md5({})`, discard it.".format( - str(e), doc - ), + f"Error({str(e)}) occurred when `md5({doc})`, discard it.", file=sys.stderr, ) @@ -319,9 +317,7 @@ def check_public_api(): cur_name = module + '.' + member_name instance = eval(cur_name) doc_md5 = md5(instance.__doc__) - member_dict[cur_name] = "({}, ('document', '{}'))".format( - cur_name, doc_md5 - ) + member_dict[cur_name] = f"({cur_name}, ('document', '{doc_md5}'))" def check_allmodule_callable(): diff --git a/tools/sampcd_processor_utils.py b/tools/sampcd_processor_utils.py index 922d20ff5c8fb..a9ac35c0a7336 100644 --- a/tools/sampcd_processor_utils.py +++ b/tools/sampcd_processor_utils.py @@ -187,7 +187,7 @@ def __init__(self, **kwargs) -> None: for name, value in kwargs.items(): # check attr name if not (hasattr(self, name) or name in MetaResult.cls_map()): - raise KeyError('`{}` is not a valid result type.'.format(name)) + raise KeyError(f'`{name}` is not a valid result type.') setattr(self, name, value) @@ -207,7 +207,7 @@ def state(self) -> Result: return self.__unique_state def __str__(self) -> str: - return '{}, running time: {:.3f}s'.format(self.name, self.time) + return f'{self.name}, running time: {self.time:.3f}s' class DocTester: @@ -654,9 +654,7 @@ def check_old_style(docstrings_to_test: typing.Dict[str, str]): codeblock_name = codeblock['name'] codeblock_id = codeblock['id'] - docstring_name = '{}:{}'.format( - api_name, codeblock_name or codeblock_id - ) + docstring_name = f'{api_name}:{codeblock_name or codeblock_id}' old_style_apis.append(docstring_name) @@ -738,9 +736,7 @@ def get_test_results( docstring = doctester.ensemble_docstring( codeblock=codeblock['codes'] ) - docstring_name = '{}:{}'.format( - api_name, codeblock_name or codeblock_id - ) + docstring_name = f'{api_name}:{codeblock_name or codeblock_id}' docstrings_extracted.append( {'name': docstring_name, 'docstring': docstring}