forked from axolotl-ai-cloud/axolotl
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #1 from vinamrabenara/test-phi-3-model
Test phi 3 model
- Loading branch information
Showing
11 changed files
with
326 additions
and
24 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -176,3 +176,9 @@ qlora-out/* | |
mlruns/* | ||
|
||
/.quarto/ | ||
prepared-datasets/ | ||
submit.sh | ||
*.out* | ||
|
||
typings/ | ||
out/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
base_model: microsoft/Phi-3-mini-4k-instruct | ||
model_type: AutoModelForCausalLM | ||
tokenizer_type: AutoTokenizer | ||
|
||
load_in_8bit: false | ||
load_in_4bit: false | ||
strict: false | ||
|
||
datasets: | ||
- path: mhenrichsen/alpaca_2k_test | ||
type: alpaca | ||
|
||
dataset_prepared_path: | ||
val_set_size: 0 | ||
output_dir: ./phi-sft-out | ||
|
||
sequence_len: 4096 | ||
sample_packing: true | ||
pad_to_sequence_len: true | ||
trust_remote_code: true | ||
|
||
adapter: | ||
lora_model_dir: | ||
lora_r: | ||
lora_alpha: | ||
lora_dropout: | ||
lora_target_linear: | ||
lora_fan_in_fan_out: | ||
|
||
wandb_project: phi3 | ||
wandb_entity: | ||
wandb_watch: | ||
wandb_name: | ||
wandb_log_model: | ||
|
||
gradient_accumulation_steps: 2 | ||
micro_batch_size: 12 | ||
num_epochs: 2 | ||
optimizer: adamw_torch | ||
adam_beta2: 0.95 | ||
adam_epsilon: 0.00001 | ||
max_grad_norm: 1.0 | ||
lr_scheduler: cosine | ||
learning_rate: 0.000003 | ||
|
||
train_on_inputs: false | ||
group_by_length: false | ||
bf16: auto | ||
fp16: | ||
tf32: true | ||
|
||
gradient_checkpointing: true | ||
gradient_checkpointing_kwargs: | ||
use_reentrant: true | ||
early_stopping_patience: | ||
resume_from_checkpoint: | ||
local_rank: | ||
logging_steps: 1 | ||
xformers_attention: | ||
flash_attention: true | ||
|
||
warmup_steps: 100 | ||
evals_per_epoch: 4 | ||
saves_per_epoch: 1 | ||
debug: | ||
deepspeed: | ||
weight_decay: 0.1 | ||
fsdp: | ||
- full_shard | ||
- auto_wrap | ||
fsdp_config: | ||
fsdp_limit_all_gathers: true | ||
fsdp_sync_module_states: true | ||
fsdp_offload_params: true | ||
fsdp_use_orig_params: false | ||
fsdp_cpu_ram_efficient_loading: true | ||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP | ||
fsdp_transformer_layer_cls_to_wrap: Phi3DecoderLayer | ||
fsdp_state_dict_type: FULL_STATE_DICT | ||
fsdp_sharding_strategy: FULL_SHARD | ||
resize_token_embeddings_to_32x: true | ||
special_tokens: | ||
pad_token: "<|endoftext|>" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
base_model: microsoft/Phi-3-mini-4k-instruct | ||
trust_remote_code: true | ||
model_type: AutoModelForCausalLM | ||
tokenizer_type: AutoTokenizer | ||
chat_template: phi_3 | ||
|
||
load_in_8bit: false | ||
load_in_4bit: false | ||
strict: false | ||
|
||
datasets: | ||
- path: garage-bAInd/Open-Platypus | ||
type: alpaca:phi | ||
|
||
dataset_prepared_path: | ||
val_set_size: 0.01 | ||
output_dir: ./out | ||
|
||
sequence_len: 4096 | ||
sample_packing: true | ||
pad_to_sequence_len: true | ||
|
||
adapter: lora | ||
lora_model_dir: | ||
lora_r: 64 | ||
lora_alpha: 32 | ||
lora_dropout: 0.05 | ||
lora_target_linear: true | ||
lora_fan_in_fan_out: | ||
|
||
gradient_accumulation_steps: 1 | ||
micro_batch_size: 2 | ||
num_epochs: 1 | ||
optimizer: adamw_torch | ||
adam_beta2: 0.95 | ||
adam_epsilon: 0.00001 | ||
max_grad_norm: 1.0 | ||
lr_scheduler: cosine | ||
learning_rate: 5.0e-6 | ||
|
||
train_on_inputs: false | ||
group_by_length: false | ||
bf16: auto | ||
|
||
gradient_checkpointing: true | ||
gradient_checkpointing_kwargs: | ||
use_reentrant: True | ||
early_stopping_patience: 3 | ||
logging_steps: 1 | ||
flash_attention: true | ||
|
||
eval_steps: 1000 | ||
save_steps: 5000 | ||
eval_table_size: 2 | ||
eval_batch_size: 2 | ||
eval_sample_packing: false | ||
eval_max_new_tokens: 32 | ||
eval_causal_lm_metrics: ["perplexity"] | ||
do_causal_lm_eval: true | ||
|
||
warmup_ratio: 0.2 | ||
debug: true | ||
weight_decay: 0.1 | ||
resize_token_embeddings_to_32x: true |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,76 @@ | ||
"""callback to calculate perplexity as an evaluation metric.""" | ||
from typing import Dict, List, Optional | ||
|
||
import torch | ||
from torch import Tensor | ||
from tqdm import tqdm | ||
from transformers.modeling_outputs import CausalLMOutput | ||
from transformers.modeling_utils import PreTrainedModel | ||
from transformers.tokenization_utils import PreTrainedTokenizer | ||
|
||
|
||
class Perplexity: | ||
""" | ||
Calculate perplexity as defined in https://huggingface.co/docs/transformers/en/perplexity. | ||
This is a custom variant that doesn't re-tokenize the input or re-load the model. | ||
""" | ||
|
||
def __init__( | ||
self, | ||
model: PreTrainedModel, | ||
tokenizer: PreTrainedTokenizer, | ||
max_seq_len: int, | ||
stride: int = 512, | ||
) -> None: | ||
self.max_seq_len = max_seq_len | ||
self.stride = stride | ||
self.model = model | ||
self.tokenizer = tokenizer | ||
self.device = model.device | ||
self.name = "perplexity" | ||
|
||
def _feature_names(self) -> List[str]: | ||
return ["references"] | ||
|
||
def compute( | ||
self, | ||
references: Optional[List[str]] = None, | ||
) -> Dict[str, float]: | ||
""" | ||
Compute perplexity in a fixed length sliding window across the sequence. | ||
""" | ||
assert references is not None, "Missing parameter: references" | ||
|
||
references_tokenized = self.tokenizer( | ||
references, return_tensors="pt", padding=True, truncation=True | ||
) | ||
input_ids: Tensor = references_tokenized["input_ids"] # type: ignore | ||
input_ids = input_ids.to(self.device) | ||
|
||
sequence_length = input_ids.size(1) | ||
|
||
losses = [] | ||
prev_end_loc = 0 | ||
for begin_loc in tqdm(range(0, sequence_length, self.stride)): | ||
end_loc = min(begin_loc + self.max_seq_len, sequence_length) | ||
trg_len = end_loc - prev_end_loc | ||
input_ids_slice = input_ids[:, begin_loc:end_loc] | ||
labels_slice = input_ids_slice.clone() | ||
labels_slice[:, :-trg_len] = -100 | ||
|
||
with torch.no_grad(): | ||
outputs: CausalLMOutput = self.model( | ||
input_ids=input_ids_slice, labels=labels_slice | ||
) | ||
|
||
losses.append(outputs.loss) | ||
|
||
prev_end_loc = end_loc | ||
if end_loc == sequence_length: | ||
break | ||
|
||
perplexity = torch.exp(torch.stack(losses).mean()).item() | ||
|
||
return { | ||
"score": perplexity, | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.