xezpeleta commited on
Commit
9b7997b
·
verified ·
1 Parent(s): 1aafed0

End of training

Browse files
README.md CHANGED
@@ -3,20 +3,33 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: openai/whisper-large-v3
5
  tags:
 
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
- - name: openai/whisper-large-v3
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # openai/whisper-large-v3
18
 
19
- This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.1549
22
  - Wer: 6.5443
 
3
  license: apache-2.0
4
  base_model: openai/whisper-large-v3
5
  tags:
6
+ - whisper-event
7
  - generated_from_trainer
8
+ datasets:
9
+ - asierhv/composite_corpus_eu_v2.1
10
  metrics:
11
  - wer
12
  model-index:
13
+ - name: Whisper Large Basque
14
+ results:
15
+ - task:
16
+ name: Automatic Speech Recognition
17
+ type: automatic-speech-recognition
18
+ dataset:
19
+ name: asierhv/composite_corpus_eu_v2.1
20
+ type: asierhv/composite_corpus_eu_v2.1
21
+ metrics:
22
+ - name: Wer
23
+ type: wer
24
+ value: 6.544273760459599
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
  should probably proofread and complete it, then remove this comment. -->
29
 
30
+ # Whisper Large Basque
31
 
32
+ This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on the asierhv/composite_corpus_eu_v2.1 dataset.
33
  It achieves the following results on the evaluation set:
34
  - Loss: 0.1549
35
  - Wer: 6.5443
all_results.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 0.140655517578125,
4
  "eval_model_preparation_time": 0.0102,
5
- "eval_runtime": 698.8029,
6
- "eval_samples_per_second": 3.011,
7
- "eval_steps_per_second": 0.376,
8
- "eval_wer": 7.100121529400767,
9
  "total_flos": 1.0871994580992e+21,
10
- "train_loss": 0.036449428302049634,
11
- "train_runtime": 47722.2934,
12
- "train_samples_per_second": 6.705,
13
- "train_steps_per_second": 0.419
14
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 0.15491345524787903,
4
  "eval_model_preparation_time": 0.0102,
5
+ "eval_runtime": 532.0406,
6
+ "eval_samples_per_second": 3.178,
7
+ "eval_steps_per_second": 0.398,
8
+ "eval_wer": 6.544273760459599,
9
  "total_flos": 1.0871994580992e+21,
10
+ "train_loss": 0.0028733723163604737,
11
+ "train_runtime": 7510.0544,
12
+ "train_samples_per_second": 42.61,
13
+ "train_steps_per_second": 2.663
14
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 0.140655517578125,
4
- "eval_runtime": 698.8029,
5
- "eval_samples_per_second": 3.011,
6
- "eval_steps_per_second": 0.376,
7
- "eval_wer": 7.100121529400767
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 0.15491345524787903,
4
+ "eval_runtime": 532.0406,
5
+ "eval_samples_per_second": 3.178,
6
+ "eval_steps_per_second": 0.398,
7
+ "eval_wer": 6.544273760459599
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 1.0871994580992e+21,
4
- "train_loss": 0.036449428302049634,
5
- "train_runtime": 47722.2934,
6
- "train_samples_per_second": 6.705,
7
- "train_steps_per_second": 0.419
8
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 1.0871994580992e+21,
4
+ "train_loss": 0.0028733723163604737,
5
+ "train_runtime": 7510.0544,
6
+ "train_samples_per_second": 42.61,
7
+ "train_steps_per_second": 2.663
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff