Titouan
commited on
Commit
·
3834b96
1
Parent(s):
a7d9c24
first commit
Browse files- README.md +77 -0
- asr.ckpt +3 -0
- config.json +68 -0
- hyperparams.yaml +119 -0
- preprocessor_config.json +8 -0
- tokenizer.ckpt +0 -0
- wav2vec2.ckpt +3 -0
README.md
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: "fr"
|
3 |
+
thumbnail:
|
4 |
+
tags:
|
5 |
+
- ASR
|
6 |
+
- CTC
|
7 |
+
- Attention
|
8 |
+
- pytorch
|
9 |
+
- speechbrain
|
10 |
+
- Transformer
|
11 |
+
license: "apache-2.0"
|
12 |
+
datasets:
|
13 |
+
- commonvoice
|
14 |
+
metrics:
|
15 |
+
- wer
|
16 |
+
- cer
|
17 |
+
---
|
18 |
+
|
19 |
+
# CRDNN with CTC/Attention trained on CommonVoice French (No LM)
|
20 |
+
|
21 |
+
This repository provides all the necessary tools to perform automatic speech
|
22 |
+
recognition from an end-to-end system pretrained on CommonVoice (French Language) within
|
23 |
+
SpeechBrain. For a better experience, we encourage you to learn more about
|
24 |
+
[SpeechBrain](https://speechbrain.github.io). The given ASR model performance are:
|
25 |
+
|
26 |
+
| Release | Test CER | Test WER | GPUs |
|
27 |
+
|:-------------:|:--------------:|:--------------:| :--------:|
|
28 |
+
| 29-04-21 | 6.54 | 13.90 | 2xV100 32GB |
|
29 |
+
|
30 |
+
## Pipeline description
|
31 |
+
|
32 |
+
This ASR system is composed of 2 different but linked blocks:
|
33 |
+
1. Tokenizer (unigram) that transforms words into subword units and trained with
|
34 |
+
the train transcriptions (train.tsv) of CommonVoice (FR).
|
35 |
+
3. Acoustic model (wav2vec2.0 + CTC/Attention). A pretrained wav2vec 2.0 model ([wav2vec2-large-xlsr-53-french](https://huggingface.co/facebook/wav2vec2-large-xlsr-53-french)) is combined with two DNN layers and finetuned on CommonVoice FR.
|
36 |
+
The obtained final acoustic representation is given to the CTC and attention decoders.
|
37 |
+
|
38 |
+
## Intended uses & limitations
|
39 |
+
|
40 |
+
This model has been primarily developed to be run within SpeechBrain as a pretrained ASR model
|
41 |
+
for the French language. Thanks to the flexibility of SpeechBrain, any of the 2 blocks
|
42 |
+
detailed above can be extracted and connected to your custom pipeline as long as SpeechBrain is
|
43 |
+
installed.
|
44 |
+
|
45 |
+
## Install SpeechBrain
|
46 |
+
|
47 |
+
First of all, please install tranformers and SpeechBrain with the following command:
|
48 |
+
|
49 |
+
```
|
50 |
+
pip install speechbrain transformers
|
51 |
+
```
|
52 |
+
|
53 |
+
Please notice that we encourage you to read our tutorials and learn more about
|
54 |
+
[SpeechBrain](https://speechbrain.github.io).
|
55 |
+
|
56 |
+
### Transcribing your own audio files (in French)
|
57 |
+
|
58 |
+
```python
|
59 |
+
from speechbrain.pretrained import EncoderDecoderASR
|
60 |
+
|
61 |
+
asr_model = EncoderDecoderASR.from_hparams(source="speechbrain/asr-crdnn-commonvoice-fr", savedir="pretrained_models/asr-crdnn-commonvoice-fr")
|
62 |
+
asr_model.transcribe_file("example-fr.wav")
|
63 |
+
|
64 |
+
```
|
65 |
+
|
66 |
+
#### Referencing SpeechBrain
|
67 |
+
|
68 |
+
```
|
69 |
+
@misc{SB2021,
|
70 |
+
author = {Ravanelli, Mirco and Parcollet, Titouan and Rouhe, Aku and Plantinga, Peter and Rastorgueva, Elena and Lugosch, Loren and Dawalatabad, Nauman and Ju-Chieh, Chou and Heba, Abdel and Grondin, Francois and Aris, William and Liao, Chien-Feng and Cornell, Samuele and Yeh, Sung-Lin and Na, Hwidong and Gao, Yan and Fu, Szu-Wei and Subakan, Cem and De Mori, Renato and Bengio, Yoshua },
|
71 |
+
title = {SpeechBrain},
|
72 |
+
year = {2021},
|
73 |
+
publisher = {GitHub},
|
74 |
+
journal = {GitHub repository},
|
75 |
+
howpublished = {\url{https://github.com/speechbrain/speechbrain}},
|
76 |
+
}
|
77 |
+
```
|
asr.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee40bc648d23dccd4d6d8cf77eb317aede679218ad192c96ad631921e7561024
|
3 |
+
size 60570064
|
config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"apply_spec_augment": true,
|
4 |
+
"architectures": [
|
5 |
+
"Wav2Vec2ForCTC"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"bos_token_id": 1,
|
9 |
+
"conv_bias": true,
|
10 |
+
"conv_dim": [
|
11 |
+
512,
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512
|
18 |
+
],
|
19 |
+
"conv_kernel": [
|
20 |
+
10,
|
21 |
+
3,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
2,
|
26 |
+
2
|
27 |
+
],
|
28 |
+
"conv_stride": [
|
29 |
+
5,
|
30 |
+
2,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2
|
36 |
+
],
|
37 |
+
"ctc_loss_reduction": "sum",
|
38 |
+
"ctc_zero_infinity": false,
|
39 |
+
"do_stable_layer_norm": true,
|
40 |
+
"eos_token_id": 2,
|
41 |
+
"feat_extract_activation": "gelu",
|
42 |
+
"feat_extract_dropout": 0.0,
|
43 |
+
"feat_extract_norm": "layer",
|
44 |
+
"feat_proj_dropout": 0.1,
|
45 |
+
"final_dropout": 0.1,
|
46 |
+
"gradient_checkpointing": false,
|
47 |
+
"hidden_act": "gelu",
|
48 |
+
"hidden_dropout": 0.1,
|
49 |
+
"hidden_dropout_prob": 0.1,
|
50 |
+
"hidden_size": 1024,
|
51 |
+
"initializer_range": 0.02,
|
52 |
+
"intermediate_size": 4096,
|
53 |
+
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.1,
|
55 |
+
"mask_feature_length": 10,
|
56 |
+
"mask_feature_prob": 0.0,
|
57 |
+
"mask_time_length": 10,
|
58 |
+
"mask_time_prob": 0.05,
|
59 |
+
"model_type": "wav2vec2",
|
60 |
+
"num_attention_heads": 16,
|
61 |
+
"num_conv_pos_embedding_groups": 16,
|
62 |
+
"num_conv_pos_embeddings": 128,
|
63 |
+
"num_feat_extract_layers": 7,
|
64 |
+
"num_hidden_layers": 24,
|
65 |
+
"pad_token_id": 0,
|
66 |
+
"transformers_version": "4.4.0.dev0",
|
67 |
+
"vocab_size": 49
|
68 |
+
}
|
hyperparams.yaml
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ################################
|
2 |
+
# Model: wav2vec2 + DNN + CTC/Attention
|
3 |
+
# Augmentation: SpecAugment
|
4 |
+
# Authors: Titouan Parcollet 2021
|
5 |
+
# ################################
|
6 |
+
|
7 |
+
sample_rate: 16000
|
8 |
+
wav2vec2_hub: facebook/wav2vec2-large-xlsr-53-french
|
9 |
+
|
10 |
+
# BPE parameters
|
11 |
+
token_type: unigram # ["unigram", "bpe", "char"]
|
12 |
+
character_coverage: 1.0
|
13 |
+
|
14 |
+
# Model parameters
|
15 |
+
activation: !name:torch.nn.LeakyReLU
|
16 |
+
dnn_layers: 2
|
17 |
+
dnn_neurons: 1024
|
18 |
+
emb_size: 128
|
19 |
+
dec_neurons: 1024
|
20 |
+
|
21 |
+
# Outputs
|
22 |
+
output_neurons: 500 # BPE size, index(blank/eos/bos) = 0
|
23 |
+
|
24 |
+
# Decoding parameters
|
25 |
+
# Be sure that the bos and eos index match with the BPEs ones
|
26 |
+
blank_index: 0
|
27 |
+
bos_index: 1
|
28 |
+
eos_index: 2
|
29 |
+
min_decode_ratio: 0.0
|
30 |
+
max_decode_ratio: 1.0
|
31 |
+
beam_size: 80
|
32 |
+
eos_threshold: 1.5
|
33 |
+
using_max_attn_shift: True
|
34 |
+
max_attn_shift: 140
|
35 |
+
ctc_weight_decode: 0.0
|
36 |
+
temperature: 1.50
|
37 |
+
|
38 |
+
enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN
|
39 |
+
input_shape: [null, null, 1024]
|
40 |
+
activation: !ref <activation>
|
41 |
+
dnn_blocks: !ref <dnn_layers>
|
42 |
+
dnn_neurons: !ref <dnn_neurons>
|
43 |
+
|
44 |
+
wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2
|
45 |
+
source: !ref <wav2vec2_hub>
|
46 |
+
output_norm: True
|
47 |
+
freeze: True
|
48 |
+
pretrain: False
|
49 |
+
save_path: model_checkpoints
|
50 |
+
|
51 |
+
emb: !new:speechbrain.nnet.embedding.Embedding
|
52 |
+
num_embeddings: !ref <output_neurons>
|
53 |
+
embedding_dim: !ref <emb_size>
|
54 |
+
|
55 |
+
dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder
|
56 |
+
enc_dim: !ref <dnn_neurons>
|
57 |
+
input_size: !ref <emb_size>
|
58 |
+
rnn_type: gru
|
59 |
+
attn_type: location
|
60 |
+
hidden_size: 1024
|
61 |
+
attn_dim: 1024
|
62 |
+
num_layers: 1
|
63 |
+
scaling: 1.0
|
64 |
+
channels: 10
|
65 |
+
kernel_size: 100
|
66 |
+
re_init: True
|
67 |
+
dropout: 0.15
|
68 |
+
|
69 |
+
ctc_lin: !new:speechbrain.nnet.linear.Linear
|
70 |
+
input_size: !ref <dnn_neurons>
|
71 |
+
n_neurons: !ref <output_neurons>
|
72 |
+
|
73 |
+
seq_lin: !new:speechbrain.nnet.linear.Linear
|
74 |
+
input_size: !ref <dec_neurons>
|
75 |
+
n_neurons: !ref <output_neurons>
|
76 |
+
|
77 |
+
log_softmax: !new:speechbrain.nnet.activations.Softmax
|
78 |
+
apply_log: True
|
79 |
+
|
80 |
+
ctc_cost: !name:speechbrain.nnet.losses.ctc_loss
|
81 |
+
blank_index: !ref <blank_index>
|
82 |
+
|
83 |
+
seq_cost: !name:speechbrain.nnet.losses.nll_loss
|
84 |
+
label_smoothing: 0.1
|
85 |
+
|
86 |
+
asr_model: !new:torch.nn.ModuleList
|
87 |
+
- [!ref <enc>, !ref <emb>, !ref <dec>, !ref <ctc_lin>, !ref <seq_lin>]
|
88 |
+
|
89 |
+
tokenizer: !new:sentencepiece.SentencePieceProcessor
|
90 |
+
|
91 |
+
encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential
|
92 |
+
wav2vec2: !ref <wav2vec2>
|
93 |
+
enc: !ref <enc>
|
94 |
+
|
95 |
+
decoder: !new:speechbrain.decoders.S2SRNNBeamSearcher
|
96 |
+
embedding: !ref <emb>
|
97 |
+
decoder: !ref <dec>
|
98 |
+
linear: !ref <seq_lin>
|
99 |
+
ctc_linear: !ref <ctc_lin>
|
100 |
+
bos_index: !ref <bos_index>
|
101 |
+
eos_index: !ref <eos_index>
|
102 |
+
blank_index: !ref <blank_index>
|
103 |
+
min_decode_ratio: !ref <min_decode_ratio>
|
104 |
+
max_decode_ratio: !ref <max_decode_ratio>
|
105 |
+
beam_size: !ref <beam_size>
|
106 |
+
eos_threshold: !ref <eos_threshold>
|
107 |
+
using_max_attn_shift: !ref <using_max_attn_shift>
|
108 |
+
max_attn_shift: !ref <max_attn_shift>
|
109 |
+
temperature: !ref <temperature>
|
110 |
+
|
111 |
+
modules:
|
112 |
+
encoder: !ref <encoder>
|
113 |
+
decoder: !ref <decoder>
|
114 |
+
|
115 |
+
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
|
116 |
+
loadables:
|
117 |
+
wav2vec2: !ref <wav2vec2>
|
118 |
+
asr: !ref <asr_model>
|
119 |
+
tokenizer: !ref <tokenizer>
|
preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": true,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
tokenizer.ckpt
ADDED
Binary file (244 kB). View file
|
|
wav2vec2.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5675c122faaa76ed0e81e658a98a7bd6e498cd79f2f171b158a6dae10985c49c
|
3 |
+
size 1261930757
|