japanese-roberta-base-20201221 / fairseq_train.sh
Kouta Nakayama
Upload fairseq_train.sh
529ca10
raw
history blame
1.21 kB
TOTAL_UPDATES=125000 # Total number of training steps
WARMUP_UPDATES=10000 # Warmup the learning rate over this many updates
PEAK_LR=0.00005 # Peak learning rate, adjust as needed
TOKENS_PER_SAMPLE=512 # Max sequence length
MAX_POSITIONS=512 # Num. positional embeddings (usually same as above)
MAX_SENTENCES=32 # Number of sequences per batch (batch size) #origin 16
UPDATE_FREQ=2 # Increase the batch size 16x #origin 16
DATA_DIR=data/wiki201221_janome_vocab_32000/data-bin
fairseq-train --fp16 $DATA_DIR \
--task masked_lm --criterion masked_lm \
--arch roberta_base --sample-break-mode complete --tokens-per-sample $TOKENS_PER_SAMPLE \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-6 --clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $PEAK_LR --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_UPDATES \
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
--batch-size $MAX_SENTENCES --update-freq $UPDATE_FREQ \
--save-dir model/roberta_base_wiki201221_janome_vocab_32000 --save-interval 10 \
--max-update $TOTAL_UPDATES --log-format simple --log-interval 1 \
--skip-invalid-size-inputs-valid-test