Usin2705 commited on
Commit
add882b
·
1 Parent(s): db00521

Upload 9 files

Browse files
README.md CHANGED
@@ -1,3 +1,82 @@
1
  ---
2
- license: cc-by-nc-sa-4.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: apache-2.0
3
+ tags:
4
+ - automatic-speech-recognition
5
+ - Finnish parliament data slow samples 300h
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: nhan_wav2vec2-xls-r-300m-finnish-ent-10
9
+ results: []
10
  ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # nhan_wav2vec2-xls-r-300m-finnish-ent-10
16
+
17
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the FINNISH PARLIAMENT DATA SLOW SAMPLES 300H - FI-FI dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 196.9006
20
+ - Cer: 0.0178
21
+ - Wer: 0.0592
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 128
42
+ - eval_batch_size: 128
43
+ - seed: 1011
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - lr_scheduler_warmup_steps: 500
47
+ - num_epochs: 10.0
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Cer | Validation Loss | Wer |
53
+ |:-------------:|:-----:|:-----:|:------:|:---------------:|:------:|
54
+ | 409.2657 | 0.47 | 500 | 1.0 | 409.9399 | 1.0 |
55
+ | 242.0796 | 0.94 | 1000 | 0.0762 | 222.4043 | 0.4204 |
56
+ | 214.4323 | 1.4 | 1500 | 0.0342 | 205.3006 | 0.1620 |
57
+ | 208.1767 | 1.87 | 2000 | 0.0261 | 201.7304 | 0.1095 |
58
+ | 205.3693 | 2.34 | 2500 | 0.0248 | 200.3012 | 0.1037 |
59
+ | 204.3477 | 2.81 | 3000 | 0.0219 | 199.3383 | 0.0830 |
60
+ | 202.9748 | 3.28 | 3500 | 0.0207 | 199.1589 | 0.0782 |
61
+ | 201.9818 | 3.75 | 4000 | 0.0207 | 198.5560 | 0.0769 |
62
+ | 201.8992 | 4.21 | 4500 | 0.0201 | 198.0990 | 0.0724 |
63
+ | 201.6079 | 4.68 | 5000 | 0.0209 | 197.8516 | 0.0712 |
64
+ | 200.6187 | 5.15 | 5500 | 0.0191 | 197.6185 | 0.0667 |
65
+ | 200.5608 | 5.62 | 6000 | 0.0189 | 197.5194 | 0.0658 |
66
+ | 200.1649 | 6.09 | 6500 | 0.0191 | 197.3655 | 0.0641 |
67
+ | 200.1713 | 6.55 | 7000 | 0.0186 | 197.2977 | 0.0629 |
68
+ | 200.1245 | 7.02 | 7500 | 0.0193 | 197.0914 | 0.0638 |
69
+ | 199.5289 | 7.49 | 8000 | 0.0181 | 197.0704 | 0.0608 |
70
+ | 199.4458 | 7.96 | 8500 | 0.0183 | 196.9986 | 0.0606 |
71
+ | 199.1502 | 8.43 | 9000 | 0.0178 | 197.0260 | 0.0590 |
72
+ | 199.4437 | 8.9 | 9500 | 0.0180 | 196.9412 | 0.0595 |
73
+ | 198.8669 | 9.36 | 10000 | 196.8834| 0.0180 | 0.0600 |
74
+ | 199.1329 | 9.83 | 10500 | 196.9176| 0.0178 | 0.0591 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - Transformers 4.18.0
80
+ - Pytorch 1.12.0.dev20220305
81
+ - Datasets 1.18.4.dev0
82
+ - Tokenizers 0.11.6
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_cer": 0.017841032626644174,
4
+ "eval_loss": 196.900634765625,
5
+ "eval_runtime": 2576.7481,
6
+ "eval_samples": 34170,
7
+ "eval_samples_per_second": 13.261,
8
+ "eval_steps_per_second": 0.104,
9
+ "eval_wer": 0.05917507718071473,
10
+ "train_loss": 21.992585440074908,
11
+ "train_runtime": 18762.4754,
12
+ "train_samples": 136678,
13
+ "train_samples_per_second": 72.846,
14
+ "train_steps_per_second": 0.569
15
+ }
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.055,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForECTC"
10
+ ],
11
+ "attention_dropout": 0.094,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.04,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.047,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.041,
62
+ "mask_feature_length": 10,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.0,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.082,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 0,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.18.0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 33,
106
+ "xvector_output_dim": 512
107
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7c57bee0b987e855a5851dafa839034be0b40f9838aac082f2d1c202f308e96
3
+ size 1262034033
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf740d4d2e16ae0e135d3e57927c6dbfdcaba5e79039717a59b7555f22c3afa5
3
+ size 3183
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"'": 1, "a": 2, "b": 3, "c": 4, "d": 5, "e": 6, "f": 7, "g": 8, "h": 9, "i": 10, "j": 11, "k": 12, "l": 13, "m": 14, "n": 15, "o": 16, "p": 17, "q": 18, "r": 19, "s": 20, "t": 21, "u": 22, "v": 23, "w": 24, "x": 25, "y": 26, "z": 27, "ä": 28, "å": 29, "ö": 30, "|": 32, "[UNK]": 31, "[PAD]": 0}