forked from haotian-liu/LLaVA
-
Notifications
You must be signed in to change notification settings - Fork 1
/
finetune_lora_v1.sh
executable file
·40 lines (38 loc) · 1.32 KB
/
finetune_lora_v1.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#!/bin/bash
# Version 1: baseline
deepspeed llava/train/train_mem.py \
--lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 0.00000125 \
--deepspeed ./scripts/zero3.json \
--model_name_or_path Viet-Mistral/Vistral-7B-Chat \
--version vistral \
--data_path data/vi_llava_train.json \
--image_folder data/images \
--vision_tower google/siglip-base-patch16-256-multilingual \
--pretrain_mm_mlp_adapter ./checkpoints/llava-vistral-7b-pretrain/mm_projector.bin \
--mm_projector_type mlp2x_gelu \
--mm_vision_select_layer -2 \
--mm_use_im_start_end False \
--mm_use_im_patch_token False \
--image_aspect_ratio pad \
--group_by_modality_length True \
--bf16 True \
--output_dir ./checkpoints/llava-vistral-7b-lora \
--num_train_epochs 1 \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 4 \
--gradient_accumulation_steps 1 \
--evaluation_strategy "no" \
--save_strategy "steps" \
--save_steps 50000 \
--save_total_limit 1 \
--learning_rate 0.0000125 \
--weight_decay 0. \
--warmup_ratio 0.03 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--tf32 True \
--model_max_length 2048 \
--gradient_checkpointing True \
--dataloader_num_workers 4 \
--lazy_preprocess True \
--report_to wandb