From 53ad2da1a8e6e79e0986ddfa3a45e1db6fdd491c Mon Sep 17 00:00:00 2001 From: Tian <121000916+SylarTiaNII@users.noreply.github.com> Date: Mon, 13 May 2024 15:29:43 +0800 Subject: [PATCH] [LLM] add assertion for enable_stage1_overlap in lora mode (#8425) --- llm/finetune_generation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llm/finetune_generation.py b/llm/finetune_generation.py index 6e4123b02df2..d0593660879d 100644 --- a/llm/finetune_generation.py +++ b/llm/finetune_generation.py @@ -462,6 +462,10 @@ def neft_post_hook(module, input, output): model.print_trainable_parameters() if model_args.lora: + if training_args.sharding_parallel_degree > 1: + assert ( + "enable_stage1_overlap" not in training_args.sharding_parallel_config + ), "Currently not support enabling sharding_stage1_overlap in lora mode." if model_args.lora_path is None: target_modules = get_lora_target_modules(model) lora_config = LoRAConfig(