forked from urchade/GLiNER
-
Notifications
You must be signed in to change notification settings - Fork 2
/
config_token.yaml
52 lines (43 loc) · 1.22 KB
/
config_token.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# Model Configuration
model_name: microsoft/deberta-v3-small # Hugging Face model
name: "token level gliner"
max_width: 100
hidden_size: 768
dropout: 0.1
fine_tune: true
subtoken_pooling: first
span_mode: token_level
# Training Parameters
num_steps: 30000
train_batch_size: 8
eval_every: 5000
warmup_ratio: 0.1
scheduler_type: "cosine"
# loss function
loss_alpha: -1 # focal loss alpha, if -1, no focal loss
loss_gamma: 0 # focal loss gamma, if 0, no focal loss
label_smoothing: 0
loss_reduction: "sum"
# Learning Rate and weight decay Configuration
lr_encoder: 1e-5
lr_others: 5e-5
weight_decay_encoder: 0.01
weight_decay_other: 0.01
max_grad_norm: 1.0
# Directory Paths
root_dir: gliner_logs
train_data: "train.json" # see https://github.com/urchade/GLiNER/tree/main/data
val_data_dir: "NER_datasets"
# "NER_datasets": val data from the paper can be obtained from "https://drive.google.com/file/d/1T-5IbocGka35I7X3CE6yKe5N_Xg2lVKT/view"
# Pretrained Model Path
# Use "none" if no pretrained model is being used
prev_path: "none"
save_total_limit: 10 #maximum amount of checkpoints to save
# Advanced Training Settings
size_sup: -1
max_types: 25
shuffle_types: true
random_drop: true
max_neg_type_ratio: 1
max_len: 384
freeze_token_rep: false