File size: 1,744 Bytes
c239b93 6a6f772 c239b93 44da8cd 6a6f772 c239b93 6a6f772 c239b93 6a6f772 c239b93 6a6f772 c239b93 6a6f772 c239b93 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
#!/bin/bash
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
export OUTPUT_DIR=/home/m3hrdadfi/code/wav2vec2-base-persian
export OUTPUT_DIR=/home/m3hrdadfi/data_cache/
export MODEL_NAME_OR_PATH=/home/m3hrdadfi/code/wav2vec2-base-persian
export TRAIN_FILE=/home/m3hrdadfi/data/fa/train_with_aug.csv
export VALIDATION_FILE=/home/m3hrdadfi/data/fa/test.csv
export SPEECH_FILE_COLUMN=path
#export MAX_EVAL_SAMPLES=5000
export PER_DEVICE_TRAIN_BATCH_SIZE=8
export PER_DEVICE_EVAL_BATCH_SIZE=8
#export GRADIENT_ACCUMULATION_STEPS=2
export NUM_TRAIN_EPOCHS=5.0
export LEARNING_RATE=5e-4
export WARMUP_STEPS=1000
export LOGGING_STEPS=500
#export EVAL_STEPS=2500
#export SAVE_STEPS=2500
export PREPROCESSING_NUM_WORKERS=4
export MAX_DURATION_IN_SECONDS=20.0
export ADAM_BETA_1=0.9
export ADAM_BETA_2=0.98
export WEIGHT_DECAY=0.01
export D_TYPE=bfloat16
export PAD_TO_MULTIPLE_OF=16384
python src/run_wav2vec2_pretrain_flax.py \
--output_dir="$OUTPUT_DIR" \
--cache_dir="$CACHE_DIR" \
--train_file="$TRAIN_FILE" \
--validation_file="$VALIDATION_FILE" \
--speech_file_column="$SPEECH_FILE_COLUMN" \
--model_name_or_path="$MODEL_NAME_OR_PATH" \
--per_device_train_batch_size=$PER_DEVICE_TRAIN_BATCH_SIZE \
--per_device_eval_batch_size=$PER_DEVICE_EVAL_BATCH_SIZE \
--preprocessing_num_workers=$PREPROCESSING_NUM_WORKERS \
--max_duration_in_seconds=$MAX_DURATION_IN_SECONDS \
--num_train_epochs=$NUM_TRAIN_EPOCHS \
--learning_rate=$LEARNING_RATE \
--warmup_steps=$WARMUP_STEPS \
--weight_decay=$WEIGHT_DECAY \
--adam_beta1=$ADAM_BETA_1 \
--adam_beta2=$ADAM_BETA_2 \
--dtype="$D_TYPE" \
--pad_to_multiple_of=$PAD_TO_MULTIPLE_OF \
--logging_steps=$LOGGING_STEPS \
--push_to_hub |