export HF_TOKEN=`cat /home/huggingface.token` | |
export HF_HOME="/home/Work/common_huggingface" | |
## IMPORTANT: This script was stopped after 1.5 epochs (2400 steps) | |
## because the training loss was exploding => the best checkpoint (2000 steps) | |
## was then taken. | |
## MAKE SURE TO DO HYPER-PARAMETER TUNING TO GET BETTER RESULTS | |
python run_speech_recognition_ctc.py \ | |
--token="${HF_TOKEN}" \ | |
--dataset_name="edinburghcstr/ami" \ | |
--model_name_or_path="facebook/wav2vec2-large-lv60" \ | |
--dataset_config_name="ihm" \ | |
--train_split_name="train" \ | |
--eval_split_name="validation" \ | |
--output_dir="./" \ | |
--preprocessing_num_workers="16" \ | |
--overwrite_output_dir \ | |
--num_train_epochs="2" \ | |
--per_device_train_batch_size="16" \ | |
--per_device_eval_batch_size="16" \ | |
--gradient_accumulation_steps="1" \ | |
--learning_rate="3e-4" \ | |
--warmup_steps="500" \ | |
--evaluation_strategy="steps" \ | |
--text_column_name="text" \ | |
--min_duration_in_seconds="0.25" \ | |
--save_steps="400" \ | |
--eval_steps="1000" \ | |
--logging_steps="1" \ | |
--layerdrop="0.0" \ | |
--save_total_limit="3" \ | |
--freeze_feature_encoder \ | |
--gradient_checkpointing \ | |
--chars_to_ignore , ? . ! - \; \: \" β % β β \ | |
--fp16 \ | |
--group_by_length \ | |
--push_to_hub \ | |
--do_eval \ | |
--do_train --do_eval | |