-
Notifications
You must be signed in to change notification settings - Fork 155
/
finetune_lora.sh
50 lines (46 loc) · 1.19 KB
/
finetune_lora.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
DIR=`pwd`
export MODEL="internlm/internlm-xcomposer2d5-7b"
# export DATA="path of data"
export DATA="data.txt"
GPUS_PER_NODE=8
NNODES=1
NODE_RANK=0
MASTER_ADDR=localhost
MASTER_PORT=6001
DISTRIBUTED_ARGS="
--nproc_per_node $GPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
torchrun $DISTRIBUTED_ARGS finetune.py \
--model_name_or_path $MODEL \
--data_path $DATA \
--given_num True \
--bf16 True \
--fix_vit True \
--fix_sampler True \
--use_lora True \
--hd_num 18 \
--output_dir output/finetune_lora \
--num_train_epochs 1 \
--batch_size 2 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--evaluation_strategy "no" \
--save_strategy "epoch" \
--save_total_limit 1 \
--learning_rate 5e-5 \
--weight_decay 0.1 \
--adam_beta2 0.95 \
--warmup_ratio 0.01 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--report_to "none" \
--max_length 16384 \
--deepspeed ds_config_zero2.json \
--gradient_checkpointing True