sanchit-gandhi
commited on
Commit
·
e456ff1
1
Parent(s):
e199278
up
Browse files- create_model.py +56 -0
- run_flax_speech_recognition_seq2seq.py +1 -0
- run_librispeech.sh +31 -0
create_model.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from transformers import AutoFeatureExtractor, AutoTokenizer, FlaxSpeechEncoderDecoderModel, GPT2Tokenizer
|
3 |
+
|
4 |
+
encoder_id = "facebook/wav2vec2-large-lv60"
|
5 |
+
decoder_id = "gpt2-medium"
|
6 |
+
|
7 |
+
model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True)
|
8 |
+
|
9 |
+
# set all encoder regularisation to zero
|
10 |
+
model.config.encoder.feat_proj_dropout = 0.0
|
11 |
+
model.config.encoder.final_dropout = 0.0
|
12 |
+
model.config.encoder.activation_dropout = 0.0
|
13 |
+
model.config.encoder.apply_spec_augment = False
|
14 |
+
model.config.encoder.attention_dropout = 0.0
|
15 |
+
model.config.encoder.feat_extract_dropout = 0.0
|
16 |
+
model.config.encoder.feat_proj_dropout = 0.0
|
17 |
+
model.config.encoder.hidden_dropout = 0.0
|
18 |
+
model.config.encoder.hidden_dropout_prob = 0.0
|
19 |
+
model.config.encoder.layerdrop = 0.0
|
20 |
+
model.config.encoder.mask_feature_prob = 0.0
|
21 |
+
model.config.encoder.mask_time_prob = 0.0
|
22 |
+
|
23 |
+
# set all decoder regularisation to zero
|
24 |
+
model.config.decoder.attn_pdrop = 0.0
|
25 |
+
model.config.decoder.embd_pdrop = 0.0
|
26 |
+
model.config.decoder.resid_pdrop = 0.0
|
27 |
+
model.config.decoder.summary_first_dropout = 0.0
|
28 |
+
|
29 |
+
# force GPT2 to append EOS to begin and end of seq
|
30 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
31 |
+
outputs = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
|
32 |
+
return outputs
|
33 |
+
|
34 |
+
GPT2Tokenizer.build_inputs_with_special_tokens = build_inputs_with_special_tokens
|
35 |
+
gpt2_tokenizer = GPT2Tokenizer.from_pretrained(decoder_id)
|
36 |
+
# set pad_token_id to unk_token_id, note: unk_token_id == eos_token_id == bos_token_id
|
37 |
+
gpt2_tokenizer.pad_token = gpt2_tokenizer.unk_token
|
38 |
+
gpt2_tokenizer.save_pretrained("./")
|
39 |
+
|
40 |
+
model.config.pad_token_id = gpt2_tokenizer.pad_token_id
|
41 |
+
model.config.decoder_start_token_id = model.config.decoder.bos_token_id
|
42 |
+
model.config.eos_token_id = model.config.decoder.eos_token_id
|
43 |
+
model.config.max_length = 50
|
44 |
+
model.config.num_beams = 1
|
45 |
+
|
46 |
+
model.config.use_cache = False
|
47 |
+
model.config.decoder.use_cache = False
|
48 |
+
model.config.processor_class = "Wav2Vec2Processor"
|
49 |
+
|
50 |
+
# check if generation works
|
51 |
+
out = model.generate(np.ones((1, 2000)))
|
52 |
+
|
53 |
+
model.save_pretrained("./")
|
54 |
+
|
55 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
|
56 |
+
feature_extractor.save_pretrained("./")
|
run_flax_speech_recognition_seq2seq.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/home/sanchitgandhi/transformers/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py
|
run_librispeech.sh
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
python run_flax_speech_recognition_seq2seq.py \
|
3 |
+
--dataset_name="librispeech_asr" \
|
4 |
+
--model_name_or_path="./" \
|
5 |
+
--dataset_config_name="clean" \
|
6 |
+
--train_split_name="train.100" \
|
7 |
+
--eval_split_name="validation" \
|
8 |
+
--output_dir="./" \
|
9 |
+
--preprocessing_num_workers="16" \
|
10 |
+
--length_column_name="input_length" \
|
11 |
+
--overwrite_output_dir \
|
12 |
+
--num_train_epochs="1" \
|
13 |
+
--per_device_train_batch_size="4" \
|
14 |
+
--per_device_eval_batch_size="4" \
|
15 |
+
--logging_steps="25" \
|
16 |
+
--max_duration_in_seconds="15" \
|
17 |
+
--max_target_length="64" \
|
18 |
+
--generation_max_length="40" \
|
19 |
+
--generation_num_beams="1" \
|
20 |
+
--learning_rate="3e-4" \
|
21 |
+
--warmup_steps="100" \
|
22 |
+
--text_column_name="text" \
|
23 |
+
--save_total_limit="1" \
|
24 |
+
--freeze_feature_encoder \
|
25 |
+
--predict_with_generate \
|
26 |
+
--do_lower_case \
|
27 |
+
--do_eval \
|
28 |
+
--do_train \
|
29 |
+
--push_to_hub \
|
30 |
+
--use_auth_token
|
31 |
+
|