#!/bin/bash export LC_ALL=C.UTF-8 export LANG=C.UTF-8 export OUTPUT_DIR=/home/m3hrdadfi/code/wav2vec2-base-persian export OUTPUT_DIR=/home/m3hrdadfi/data_cache/ export MODEL_NAME_OR_PATH=/home/m3hrdadfi/code/wav2vec2-base-persian export TRAIN_FILE=/home/m3hrdadfi/data/fa/train_with_aug.csv export VALIDATION_FILE=/home/m3hrdadfi/data/fa/test.csv export SPEECH_FILE_COLUMN=path #export MAX_EVAL_SAMPLES=5000 export PER_DEVICE_TRAIN_BATCH_SIZE=8 export PER_DEVICE_EVAL_BATCH_SIZE=8 #export GRADIENT_ACCUMULATION_STEPS=2 export NUM_TRAIN_EPOCHS=5.0 export LEARNING_RATE=5e-4 export WARMUP_STEPS=1000 export LOGGING_STEPS=500 #export EVAL_STEPS=2500 #export SAVE_STEPS=2500 export PREPROCESSING_NUM_WORKERS=4 export MAX_DURATION_IN_SECONDS=20.0 export ADAM_BETA_1=0.9 export ADAM_BETA_2=0.98 export WEIGHT_DECAY=0.01 export D_TYPE=bfloat16 export PAD_TO_MULTIPLE_OF=16384 python src/run_wav2vec2_pretrain_flax.py \ --output_dir="$OUTPUT_DIR" \ --cache_dir="$CACHE_DIR" \ --train_file="$TRAIN_FILE" \ --validation_file="$VALIDATION_FILE" \ --speech_file_column="$SPEECH_FILE_COLUMN" \ --model_name_or_path="$MODEL_NAME_OR_PATH" \ --per_device_train_batch_size=$PER_DEVICE_TRAIN_BATCH_SIZE \ --per_device_eval_batch_size=$PER_DEVICE_EVAL_BATCH_SIZE \ --preprocessing_num_workers=$PREPROCESSING_NUM_WORKERS \ --max_duration_in_seconds=$MAX_DURATION_IN_SECONDS \ --num_train_epochs=$NUM_TRAIN_EPOCHS \ --learning_rate=$LEARNING_RATE \ --warmup_steps=$WARMUP_STEPS \ --weight_decay=$WEIGHT_DECAY \ --adam_beta1=$ADAM_BETA_1 \ --adam_beta2=$ADAM_BETA_2 \ --dtype="$D_TYPE" \ --pad_to_multiple_of=$PAD_TO_MULTIPLE_OF \ --logging_steps=$LOGGING_STEPS \ --push_to_hub