-
Notifications
You must be signed in to change notification settings - Fork 3
/
sbatch_run_jointlk__medqa_usmle.sh
68 lines (57 loc) · 2.34 KB
/
sbatch_run_jointlk__medqa_usmle.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#!/bin/env bash
#SBATCH -J medqa_jointlk_train
#SBATCH -o log.medqa_jointlk_train.txt # 屏幕上的输出文件重定向到 test.out
#SBATCH -e slurm-%j.err # 屏幕上的错误输出文件重定向到 slurm-%j.err , %j 会替换成jobid
#SBATCH -p compute # 作业提交的分区为 cpu
#SBATCH -N 1 # 作业申请 1 个节点
#SBATCH --ntasks-per-node=1 # 单节点启动的进程数为 1
#SBATCH --cpus-per-task=4 # 单任务使用的 CPU 核心数为 4
#SBATCH --mem=2GB
#SBATCH -t 1-00:00:00 # 任务运行的最长时间为 1 小时
#SBATCH --gres=gpu:tesla_v100-sxm2-16gb:1 # 单个节点使用 1 块 GPU 卡tesla_v100-sxm2-16gb
# SBATCH -w gpu25
source ~/.bashrc
dt=`date '+%Y%m%d_%H%M%S'`
dataset="medqa_usmle"
model='cambridgeltl/SapBERT-from-PubMedBERT-fulltext'
shift
shift
args=$@
elr="5e-5"
dlr="1e-3"
bs=128
mbs=2
sl=512
n_epochs=15
ent_emb='ddb'
num_relation=34
k=5 #num of gnn layers
gnndim=200
unfrz=0
echo "***** hyperparameters *****"
echo "dataset: $dataset"
echo "enc_name: $model"
echo "batch_size: $bs"
echo "learning_rate: elr $elr dlr $dlr"
echo "gnn: dim $gnndim layer $k"
echo "******************************"
save_dir_pref='saved_models'
mkdir -p $save_dir_pref
mkdir -p logs
###### Training ######
for seed in 0; do
python3 -u jointlk.py --dataset $dataset \
--encoder $model -k $k --gnn_dim $gnndim -elr $elr -dlr $dlr -bs $bs -mbs $mbs -sl $sl --seed $seed \
--num_relation $num_relation \
--n_epochs $n_epochs --max_epochs_before_stop 10 --unfreeze_epoch $unfrz \
--train_adj data/${dataset}/graph/train.graph.adj.pk \
--dev_adj data/${dataset}/graph/dev.graph.adj.pk \
--test_adj data/${dataset}/graph/test.graph.adj.pk \
--train_statements data/${dataset}/statement/train.statement.jsonl \
--dev_statements data/${dataset}/statement/dev.statement.jsonl \
--test_statements data/${dataset}/statement/test.statement.jsonl \
--ent_emb ${ent_emb} \
--save_model \
--save_dir ${save_dir_pref}/${dataset}/enc-sapbert__k${k}__gnndim${gnndim}__bs${bs}__seed${seed}__${dt} $args \
> logs/train_${dataset}__enc-sapbert__k${k}__gnndim${gnndim}__bs${bs}__sl${sl}__unfrz${unfrz}__seed${seed}__${dt}.log.txt
done