Skip to content

Commit

Permalink
Merge branch 'main' into mitchish
Browse files Browse the repository at this point in the history
  • Loading branch information
epwalsh authored Jan 5, 2024
2 parents 7f356ac + df19554 commit f3a73dd
Show file tree
Hide file tree
Showing 19 changed files with 930 additions and 94 deletions.
183 changes: 183 additions & 0 deletions configs/mitchish35.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
run_name: mitchish35-001
seed: 6198
dry_run: false

wandb:
name: ${run_name}
project: olmo-large
group: mitchish35

model:
d_model: 7168
n_heads: 56
n_layers: 56
# mlp_ratio: 6
mlp_hidden_size: 37888
weight_tying: false
alibi: false
rope: true
flash_attention: true
attention_dropout: 0.0
attention_layer_norm: false
multi_query_attention: false
include_bias: false
block_type: sequential
layer_norm_type: default
layer_norm_with_affine: false
bias_for_layer_norm: false
attention_layer_norm_with_affine: false
activation_type: swiglu
residual_dropout: 0.0
embedding_dropout: 0.0
max_sequence_length: 2048
vocab_size: 50280
embedding_size: 50304
eos_token_id: 0
pad_token_id: 1
init_device: meta
init_fn: mitchell

compile: null

optimizer:
name: adamw
learning_rate: 1.5e-4
weight_decay: 0.1
decay_norm_and_bias: true
decay_embeddings: true
betas:
- 0.9
- 0.95
metrics_log_interval: 10

scheduler:
name: linear_with_warmup
t_warmup: 5000
alpha_f: 0.1
grad_clip_warmup_steps: 1000
grad_clip_warmup_factor: 5

tokenizer:
identifier: tokenizers/allenai_eleuther-ai-gpt-neox-20b-pii-special.json
truncate_direction: right

save_folder: ${oc.env:CHECKPOINTS_PATH}/${oc.env:SLURM_JOB_ID,${run_name}}
save_overwrite: false

# Sharded checkpoints (best for restarts)
save_interval: 500
save_num_checkpoints_to_keep: -1
sharded_checkpointer: local

# Unsharded checkpoints (for final storage)
save_interval_unsharded: null
save_num_unsharded_checkpoints_to_keep: -1

load_path: null

max_duration: 953674 # 2T tokens
global_train_batch_size: 1024
device_train_microbatch_size: 1

precision: amp_bf16

fsdp:
wrapping_strategy: by_block
precision: mixed

activation_checkpointing: whole_layer

max_grad_norm: 1.0
max_grad_norm_ratio: null

speed_monitor:
window_size: 20

eval_interval: ${save_interval}
eval_subset_num_batches: -1
device_eval_batch_size: ${device_train_microbatch_size}
evaluators:
- label: all-small-ppl-validation
data:
num_workers: 0
drop_last: true
# pin_memory: true
# prefetch_factor: 1
# persistent_workers: false
# timeout: 0
datasets:
4chan-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/4chan/val.npy
c4_100_domains-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_100_domains/val.npy
c4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_en/val.npy
gab-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/gab/val.npy
ice-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ice/val.npy
m2d2_s2orc-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_s2orc/val.npy
m2d2_wiki-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_wiki/val.npy
manosphere-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/manosphere/val.npy
mc4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/mc4_en/val.npy
pile-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/pile/val.npy
ptb-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ptb/val.npy
twitterAEE-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/twitterAEE/val.npy
wikitext_103-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/wikitext_103/val.npy

##########################
# Downstream evaluations #
##########################
- label: piqa
type: downstream

- label: hellaswag
type: downstream

- label: winogrande
type: downstream

- label: openbook_qa
type: downstream

# - label: boolq # requires implemention of the pmi_dc matrix
# type: downstream

- label: sciq
type: downstream

- label: arc_easy
type: downstream

# - label: arc_challenge # requires implemention of the pmi_dc matrix
# type: downstream

- label: copa
type: downstream

- label: rte
type: downstream

- label: commitment_bank
type: downstream

- label: sst2
type: downstream

data:
paths: ${path.glob:${oc.env:DATA_PATH}/v1_5-sample/gpt-neox-20b-pii-special-longrunfix/*.npy}
pad_direction: right
num_workers: 0
drop_last: true
pin_memory: true
prefetch_factor: 16
persistent_workers: true
timeout: 0
183 changes: 183 additions & 0 deletions configs/mitchish50.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
run_name: mitchish50-001
seed: 6198
dry_run: false

wandb:
name: ${run_name}
project: olmo-large
group: mitchish50

model:
d_model: 8192
n_heads: 64
n_layers: 64
# mlp_ratio: 6
mlp_hidden_size: 40960
weight_tying: false
alibi: false
rope: true
flash_attention: true
attention_dropout: 0.0
attention_layer_norm: false
multi_query_attention: false
include_bias: false
block_type: sequential
layer_norm_type: default
layer_norm_with_affine: false
bias_for_layer_norm: false
attention_layer_norm_with_affine: false
activation_type: swiglu
residual_dropout: 0.0
embedding_dropout: 0.0
max_sequence_length: 2048
vocab_size: 50280
embedding_size: 50304
eos_token_id: 0
pad_token_id: 1
init_device: meta
init_fn: mitchell

compile: null

optimizer:
name: adamw
learning_rate: 1.5e-4
weight_decay: 0.1
decay_norm_and_bias: true
decay_embeddings: true
betas:
- 0.9
- 0.95
metrics_log_interval: 10

scheduler:
name: linear_with_warmup
t_warmup: 5000
alpha_f: 0.1
grad_clip_warmup_steps: 1000
grad_clip_warmup_factor: 5

tokenizer:
identifier: tokenizers/allenai_eleuther-ai-gpt-neox-20b-pii-special.json
truncate_direction: right

save_folder: ${oc.env:CHECKPOINTS_PATH}/${oc.env:SLURM_JOB_ID,${run_name}}
save_overwrite: false

# Sharded checkpoints (best for restarts)
save_interval: 500
save_num_checkpoints_to_keep: -1
sharded_checkpointer: local

# Unsharded checkpoints (for final storage)
save_interval_unsharded: null
save_num_unsharded_checkpoints_to_keep: -1

load_path: null

max_duration: 953674 # 2T tokens
global_train_batch_size: 1024
device_train_microbatch_size: 1

precision: amp_bf16

fsdp:
wrapping_strategy: by_block
precision: mixed

activation_checkpointing: whole_layer

max_grad_norm: 1.0
max_grad_norm_ratio: null

speed_monitor:
window_size: 20

eval_interval: ${save_interval}
eval_subset_num_batches: -1
device_eval_batch_size: ${device_train_microbatch_size}
evaluators:
- label: all-small-ppl-validation
data:
num_workers: 0
drop_last: true
# pin_memory: true
# prefetch_factor: 1
# persistent_workers: false
# timeout: 0
datasets:
4chan-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/4chan/val.npy
c4_100_domains-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_100_domains/val.npy
c4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_en/val.npy
gab-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/gab/val.npy
ice-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ice/val.npy
m2d2_s2orc-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_s2orc/val.npy
m2d2_wiki-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_wiki/val.npy
manosphere-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/manosphere/val.npy
mc4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/mc4_en/val.npy
pile-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/pile/val.npy
ptb-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ptb/val.npy
twitterAEE-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/twitterAEE/val.npy
wikitext_103-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/wikitext_103/val.npy

##########################
# Downstream evaluations #
##########################
- label: piqa
type: downstream

- label: hellaswag
type: downstream

- label: winogrande
type: downstream

- label: openbook_qa
type: downstream

# - label: boolq # requires implemention of the pmi_dc matrix
# type: downstream

- label: sciq
type: downstream

- label: arc_easy
type: downstream

# - label: arc_challenge # requires implemention of the pmi_dc matrix
# type: downstream

- label: copa
type: downstream

- label: rte
type: downstream

- label: commitment_bank
type: downstream

- label: sst2
type: downstream

data:
paths: ${path.glob:${oc.env:DATA_PATH}/v1_5-sample/gpt-neox-20b-pii-special-longrunfix/*.npy}
pad_direction: right
num_workers: 0
drop_last: true
pin_memory: true
prefetch_factor: 16
persistent_workers: true
timeout: 0
Loading

0 comments on commit f3a73dd

Please sign in to comment.