Skip to content

Commit

Permalink
add ci
Browse files Browse the repository at this point in the history
Signed-off-by: Chen Cui <[email protected]>
  • Loading branch information
cuichenx committed Nov 26, 2024
1 parent a71bfee commit 06ff291
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 2 deletions.
29 changes: 29 additions & 0 deletions .github/workflows/cicd-main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4361,6 +4361,34 @@ jobs:
--pp_size 1 \
--mbs 1 --packed
L2_NeMo_2_GPT_CLoRA_TP1PP1_MBS1_PACKED:
needs: [cicd-test-container-setup]
uses: ./.github/workflows/_test_template.yml
if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NeMo_2_GPT_CLoRA_TP1PP1_MBS1_PACKED') || needs.cicd-test-container-setup.outputs.all == 'true'
with:
RUNNER: self-hosted-azure
SCRIPT: |
python tests/collections/llm/gpt_finetuning.py \
--restore_path /home/TestData/nemo2_ckpt/llama_68M \
--devices 2 \
--max_steps 3 \
--experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \
--peft canonical_lora \
--tp_size 1 \
--pp_size 1 \
--mbs 1 --packed

python tests/collections/llm/gpt_finetuning.py \
--restore_path /home/TestData/nemo2_ckpt/llama_68M \
--devices 2 \
--max_steps 6 \
--experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \
--peft canonical_lora \
--tp_size 1 \
--pp_size 1 \
--mbs 1 --packed

L2_NeMo_2_Mixtral_LoRA_EP2PP1_MBS2:
needs: [cicd-test-container-setup]
uses: ./.github/workflows/_test_template.yml
Expand Down Expand Up @@ -4604,6 +4632,7 @@ jobs:
- L2_NeMo_2_GPT_LoRA_TP2PP1_MBS2
- L2_NeMo_2_GPT_LoRA_TP1PP1_MBS1_PACKED
- L2_NeMo_2_GPT_DoRA_TP1PP1_MBS1_PACKED
- L2_NeMo_2_GPT_CLoRA_TP1PP1_MBS1_PACKED
- L2_NeMo_2_Mixtral_LoRA_EP2PP1_MBS2
- L2_NeMo_2_Mixtral_LoRA_TP1PP1_MBS1
- L2_NeMo_2_Mixtral_LoRA_TP2PP1_MBS1
Expand Down
4 changes: 2 additions & 2 deletions tests/collections/llm/gpt_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def get_args():
),
)

if args.peft in ['lora', 'dora']:
if args.peft in llm.peft.PEFT_STR2CLS.keys():
peft = llm.peft.PEFT_STR2CLS[args.peft]()
else:
peft = None
Expand All @@ -105,7 +105,7 @@ def get_args():
dolly = llm.DollyDataModule(
seq_length=2048,
micro_batch_size=args.mbs,
global_batch_size=8,
global_batch_size=4,
num_workers=0,
packed_sequence_specs=packed_sequence_specs,
)
Expand Down

0 comments on commit 06ff291

Please sign in to comment.