-
Notifications
You must be signed in to change notification settings - Fork 3
/
wikitext_bpe_extra_embed_finetune.sh
84 lines (75 loc) · 5.12 KB
/
wikitext_bpe_extra_embed_finetune.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# train freq, approx 3V, all init
python train.py --task language_modeling \
data-bin/wikitext103-bpe \
--save-dir checkpoints/wikitext103-bpe-extra-embed-3v-init-finetune \
--arch transformer_lm_wikibpe --restore-file checkpoints/wikitext103-bpe/checkpoint_best.pt \
--reset-optimizer --reset-dataloader --reset-meters \
--finetune-out-embed --num-extra-embed-file analysis/train_freq_num_extra_embed_3v.json \
--criterion agg_softmax \
--max-update 286000 --optimizer nag --lr 5e-2 --clip-norm 100 \
--max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --fp16
# continue
python train.py --task language_modeling \
data-bin/wikitext103-bpe \
--save-dir checkpoints/wikitext103-bpe-extra-embed-3v-init-finetune \
--arch transformer_lm_wikibpe --restore-file checkpoints/wikitext103-bpe-extra-embed-3v-init-finetune/checkpoint_last.pt \
--finetune-out-embed --num-extra-embed-file analysis/train_freq_num_extra_embed_3v.json \
--criterion agg_softmax \
--max-update 286000 --optimizer nag --lr 5e-2 --clip-norm 100 \
--max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --fp16
# train freq, approx 3V, preserve pretrained embedding (first block)
python train.py --task language_modeling \
data-bin/wikitext103-bpe \
--save-dir checkpoints/wikitext103-bpe-extra-embed-3v-preserve-finetune \
--arch transformer_lm_wikibpe --restore-file checkpoints/wikitext103-bpe/checkpoint_best.pt \
--reset-optimizer --reset-dataloader --reset-meters \
--finetune-out-embed --num-extra-embed-file analysis/train_freq_num_extra_embed_3v.json \
--criterion agg_softmax --preserve-out-embed \
--max-update 286000 --optimizer nag --lr 1e-3 --clip-norm 100 \
--max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --fp16 --dropout 0.1
# train total loss, approx 3V, all init
python train.py --task language_modeling \
data-bin/wikitext103-bpe \
--save-dir checkpoints/wikitext103-bpe-extra-embed-loss-3v-init-finetune \
--arch transformer_lm_wikibpe --restore-file checkpoints/wikitext103-bpe/checkpoint_best.pt \
--reset-optimizer --reset-dataloader --reset-meters \
--finetune-out-embed --num-extra-embed-file analysis/train_loss_num_extra_embed_3v.json \
--criterion agg_softmax \
--max-update 286000 --optimizer nag --lr 5e-2 --clip-norm 100 \
--max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --fp16
# train total loss, approx 3V, preserve pretrained embedding (first block)
python train.py --task language_modeling \
data-bin/wikitext103-bpe \
--save-dir checkpoints/wikitext103-bpe-extra-embed-loss-3v-preserve-finetune \
--arch transformer_lm_wikibpe --restore-file checkpoints/wikitext103-bpe/checkpoint_best.pt \
--reset-optimizer --reset-dataloader --reset-meters \
--finetune-out-embed --num-extra-embed-file analysis/train_loss_num_extra_embed_3v.json \
--criterion agg_softmax --preserve-out-embed \
--max-update 286000 --optimizer nag --lr 1e-3 --clip-norm 100 \
--max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --fp16 --dropout 0.1
## eval
python eval_lm.py data-bin/wikitext103-bpe \
--path checkpoints/wikitext103-bpe-extra-embed-3v-init-finetune/checkpoint_best.pt \
--sample-break-mode complete --max-tokens 3072 \
--context-window 2560 --softmax-batch 1024 --num-extra-embed-file analysis/train_freq_num_extra_embed_3v.json \
--gen-subset valid --bpe subword_nmt --remove-bpe --save-scores extra_embed_scores/3v-init-finetune.npy
python eval_lm.py data-bin/wikitext103-bpe \
--path checkpoints/wikitext103-bpe-extra-embed-loss-3v-init-finetune/checkpoint_best.pt \
--sample-break-mode complete --max-tokens 3072 \
--context-window 2560 --softmax-batch 1024 --num-extra-embed-file analysis/train_loss_num_extra_embed_3v.json \
--gen-subset valid --bpe subword_nmt --remove-bpe --save-scores extra_embed_scores/loss-3v-init-finetune.npy
python eval_lm.py data-bin/wikitext103-bpe \
--path checkpoints/wikitext103-bpe-extra-embed-3v-preserve-finetune/checkpoint_best.pt \
--sample-break-mode complete --max-tokens 3072 \
--context-window 2560 --softmax-batch 1024 --num-extra-embed-file analysis/train_freq_num_extra_embed_3v.json \
--gen-subset valid --bpe subword_nmt --remove-bpe --save-scores extra_embed_scores/3v-preserve-finetune.npy
python eval_lm.py data-bin/wikitext103-bpe \
--path checkpoints/wikitext103-bpe-extra-embed-loss-3v-preserve-finetune/checkpoint_best.pt \
--sample-break-mode complete --max-tokens 3072 \
--context-window 2560 --softmax-batch 1024 --num-extra-embed-file analysis/train_loss_num_extra_embed_3v.json \
--gen-subset valid --bpe subword_nmt --remove-bpe --save-scores extra_embed_scores/loss-3v-preserve-finetune.npy