Kouta Nakayama
commited on
Commit
·
529ca10
1
Parent(s):
99417e1
Upload fairseq_train.sh
Browse files- fairseq_train.sh +20 -0
fairseq_train.sh
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TOTAL_UPDATES=125000 # Total number of training steps
|
2 |
+
WARMUP_UPDATES=10000 # Warmup the learning rate over this many updates
|
3 |
+
PEAK_LR=0.00005 # Peak learning rate, adjust as needed
|
4 |
+
TOKENS_PER_SAMPLE=512 # Max sequence length
|
5 |
+
MAX_POSITIONS=512 # Num. positional embeddings (usually same as above)
|
6 |
+
MAX_SENTENCES=32 # Number of sequences per batch (batch size) #origin 16
|
7 |
+
UPDATE_FREQ=2 # Increase the batch size 16x #origin 16
|
8 |
+
|
9 |
+
DATA_DIR=data/wiki201221_janome_vocab_32000/data-bin
|
10 |
+
|
11 |
+
fairseq-train --fp16 $DATA_DIR \
|
12 |
+
--task masked_lm --criterion masked_lm \
|
13 |
+
--arch roberta_base --sample-break-mode complete --tokens-per-sample $TOKENS_PER_SAMPLE \
|
14 |
+
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-6 --clip-norm 0.0 \
|
15 |
+
--lr-scheduler polynomial_decay --lr $PEAK_LR --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_UPDATES \
|
16 |
+
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
|
17 |
+
--batch-size $MAX_SENTENCES --update-freq $UPDATE_FREQ \
|
18 |
+
--save-dir model/roberta_base_wiki201221_janome_vocab_32000 --save-interval 10 \
|
19 |
+
--max-update $TOTAL_UPDATES --log-format simple --log-interval 1 \
|
20 |
+
--skip-invalid-size-inputs-valid-test
|