# ################################ # Model: wav2vec2 + DNN + CTC # Augmentation: SpecAugment # Authors: Sung-Lin Yeh 2021, Titouan Parcollet 2022 # ################################ # URL for the biggest Fairseq english wav2vec2 model. wav2vec2_hub: facebook/wav2vec2-large-960h-lv60-self sample_rate: 16000 # Model parameters activation: !name:torch.nn.LeakyReLU dnn_layers: 2 dnn_neurons: 1024 freeze_wav2vec: True # Outputs output_neurons: 31 # BPE size, index(blank/eos/bos) = 0 # Decoding parameters blank_index: 0 bos_index: 1 eos_index: 2 enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN input_shape: [null, null, 1024] activation: !ref dnn_blocks: !ref dnn_neurons: !ref wav2vec2: !new:speechbrain.lobes.models.huggingface_transformers.Wav2Vec2 source: !ref output_norm: True freeze: True save_path: model_checkpoints ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref asr_model: !new:torch.nn.ModuleList - [!ref , !ref ] tokenizer: !new:speechbrain.dataio.encoder.CTCTextEncoder encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential wav2vec2: !ref enc: !ref ctc_lin: !ref decoding_function: !name:speechbrain.decoders.ctc_greedy_decode blank_id: !ref modules: encoder: !ref pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: wav2vec2: !ref asr: !ref tokenizer: !ref