speechbrainteam commited on
Commit
0b9dca9
1 Parent(s): 44f021d

Delete hyperparams.yaml

Browse files
Files changed (1) hide show
  1. hyperparams.yaml +0 -152
hyperparams.yaml DELETED
@@ -1,152 +0,0 @@
1
- # ############################################################################
2
- # Model: E2E ASR with Transformer
3
- # Encoder: Transformer Encoder
4
- # Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM
5
- # Tokens: unigram
6
- # losses: CTC + KLdiv (Label Smoothing loss)
7
- # Training: Librispeech 960h
8
- # Authors: Jianyuan Zhong, Titouan Parcollet 2021
9
- # ############################################################################
10
-
11
- # Feature parameters
12
- sample_rate: 16000
13
- n_fft: 512
14
- n_mels: 80
15
-
16
- ####################### Model parameters ###########################
17
- # Transformer
18
- d_model: 512
19
- nhead: 8
20
- num_encoder_layers: 12
21
- num_decoder_layers: 6
22
- d_ffn: 2048
23
- transformer_dropout: 0.1
24
- activation: !name:torch.nn.GELU
25
- output_neurons: 5000
26
-
27
- # Outputs
28
- blank_index: 0
29
- label_smoothing: 0.1
30
- pad_index: 0
31
- bos_index: 1
32
- eos_index: 2
33
-
34
- # Decoding parameters
35
- min_decode_ratio: 0.0
36
- max_decode_ratio: 1.0
37
- valid_search_interval: 10
38
- valid_beam_size: 10
39
- test_beam_size: 66
40
- lm_weight: 0.60
41
- ctc_weight_decode: 0.40
42
-
43
- ############################## models ################################
44
-
45
- CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd
46
- input_shape: (8, 10, 80)
47
- num_blocks: 2
48
- num_layers_per_block: 1
49
- out_channels: (64, 32)
50
- kernel_sizes: (3, 3)
51
- strides: (2, 2)
52
- residuals: (False, False)
53
-
54
- Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length
55
- input_size: 640
56
- tgt_vocab: !ref <output_neurons>
57
- d_model: !ref <d_model>
58
- nhead: !ref <nhead>
59
- num_encoder_layers: !ref <num_encoder_layers>
60
- num_decoder_layers: !ref <num_decoder_layers>
61
- d_ffn: !ref <d_ffn>
62
- dropout: !ref <transformer_dropout>
63
- activation: !ref <activation>
64
- encoder_module: conformer
65
- attention_type: RelPosMHAXL
66
- normalize_before: True
67
- causal: False
68
-
69
- ctc_lin: !new:speechbrain.nnet.linear.Linear
70
- input_size: !ref <d_model>
71
- n_neurons: !ref <output_neurons>
72
-
73
- seq_lin: !new:speechbrain.nnet.linear.Linear
74
- input_size: !ref <d_model>
75
- n_neurons: !ref <output_neurons>
76
-
77
- decoder: !new:speechbrain.decoders.S2STransformerBeamSearch
78
- modules: [!ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>]
79
- bos_index: !ref <bos_index>
80
- eos_index: !ref <eos_index>
81
- blank_index: !ref <blank_index>
82
- min_decode_ratio: !ref <min_decode_ratio>
83
- max_decode_ratio: !ref <max_decode_ratio>
84
- beam_size: !ref <test_beam_size>
85
- ctc_weight: !ref <ctc_weight_decode>
86
- lm_weight: !ref <lm_weight>
87
- lm_modules: !ref <lm_model>
88
- temperature: 1.15
89
- temperature_lm: 1.15
90
- using_eos_threshold: False
91
- length_normalization: True
92
-
93
- log_softmax: !new:torch.nn.LogSoftmax
94
- dim: -1
95
-
96
- normalizer: !new:speechbrain.processing.features.InputNormalization
97
- norm_type: global
98
-
99
- compute_features: !new:speechbrain.lobes.features.Fbank
100
- sample_rate: !ref <sample_rate>
101
- n_fft: !ref <n_fft>
102
- n_mels: !ref <n_mels>
103
-
104
- # This is the Transformer LM that is used according to the Huggingface repository
105
- # Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path
106
- # For more details about the model!
107
- # NB: It has to match the pre-trained TransformerLM!!
108
- lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM
109
- vocab: 5000
110
- d_model: 768
111
- nhead: 12
112
- num_encoder_layers: 12
113
- num_decoder_layers: 0
114
- d_ffn: 3072
115
- dropout: 0.0
116
- activation: !name:torch.nn.GELU
117
- normalize_before: False
118
-
119
- tokenizer: !new:sentencepiece.SentencePieceProcessor
120
-
121
- Tencoder: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper
122
- transformer: !ref <Transformer>
123
-
124
- encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential
125
- input_shape: [null, null, !ref <n_mels>]
126
- compute_features: !ref <compute_features>
127
- normalize: !ref <normalizer>
128
- cnn: !ref <CNN>
129
- transformer_encoder: !ref <Tencoder>
130
-
131
- # Models
132
- asr_model: !new:torch.nn.ModuleList
133
- - [!ref <CNN>, !ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>]
134
-
135
- modules:
136
- compute_features: !ref <compute_features>
137
- normalizer: !ref <normalizer>
138
- pre_transformer: !ref <CNN>
139
- transformer: !ref <Transformer>
140
- asr_model: !ref <asr_model>
141
- lm_model: !ref <lm_model>
142
- encoder: !ref <encoder>
143
- decoder: !ref <decoder>
144
-
145
- # The pretrainer allows a mapping between pretrained files and instances that
146
- # are declared in the yaml.
147
- pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
148
- loadables:
149
- normalizer: !ref <normalizer>
150
- asr: !ref <asr_model>
151
- lm: !ref <lm_model>
152
- tokenizer: !ref <tokenizer>