csikasote commited on
Commit
9cba4ef
·
verified ·
1 Parent(s): 7a2b404

End of training

Browse files
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-toigen-combined-model
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-toigen-combined-model
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7208
22
- - Wer: 0.3864
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - toigen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-toigen-combined-model
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: toigen
19
+ type: toigen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.4497528830313015
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-toigen-combined-model
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the toigen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.6425
34
+ - Wer: 0.4498
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.6059171597633135,
3
+ "eval_loss": 0.6425132155418396,
4
+ "eval_runtime": 253.9056,
5
+ "eval_samples": 392,
6
+ "eval_samples_per_second": 1.544,
7
+ "eval_steps_per_second": 0.772,
8
+ "eval_wer": 0.4497528830313015,
9
+ "total_flos": 1.138790955810816e+19,
10
+ "train_loss": 1.9424426797458103,
11
+ "train_runtime": 4520.3552,
12
+ "train_samples": 1689,
13
+ "train_samples_per_second": 11.209,
14
+ "train_steps_per_second": 1.4
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.6059171597633135,
3
+ "eval_loss": 0.6425132155418396,
4
+ "eval_runtime": 253.9056,
5
+ "eval_samples": 392,
6
+ "eval_samples_per_second": 1.544,
7
+ "eval_steps_per_second": 0.772,
8
+ "eval_wer": 0.4497528830313015
9
+ }
runs/Jan05_20-15-26_srvrocgpu011.uct.ac.za/events.out.tfevents.1736105977.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a6686ce5c20a83557d8b7728c09481c0db8abd443b9200214745f048cae3471
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.6059171597633135,
3
+ "total_flos": 1.138790955810816e+19,
4
+ "train_loss": 1.9424426797458103,
5
+ "train_runtime": 4520.3552,
6
+ "train_samples": 1689,
7
+ "train_samples_per_second": 11.209,
8
+ "train_steps_per_second": 1.4
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6425132155418396,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-toigen-combined-model/checkpoint-800",
4
+ "epoch": 6.6059171597633135,
5
+ "eval_steps": 200,
6
+ "global_step": 1400,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.11834319526627218,
13
+ "grad_norm": 120.00074768066406,
14
+ "learning_rate": 4.0000000000000003e-07,
15
+ "loss": 13.7413,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.23668639053254437,
20
+ "grad_norm": 93.74044799804688,
21
+ "learning_rate": 9.000000000000001e-07,
22
+ "loss": 10.9596,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.35502958579881655,
27
+ "grad_norm": 83.35017395019531,
28
+ "learning_rate": 1.4000000000000001e-06,
29
+ "loss": 8.1662,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.47337278106508873,
34
+ "grad_norm": 69.67986297607422,
35
+ "learning_rate": 1.9000000000000002e-06,
36
+ "loss": 6.1015,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.591715976331361,
41
+ "grad_norm": 65.54053497314453,
42
+ "learning_rate": 2.4000000000000003e-06,
43
+ "loss": 4.9343,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.7100591715976331,
48
+ "grad_norm": 51.996883392333984,
49
+ "learning_rate": 2.9e-06,
50
+ "loss": 4.21,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 0.8284023668639053,
55
+ "grad_norm": 52.11565017700195,
56
+ "learning_rate": 3.4000000000000005e-06,
57
+ "loss": 4.4511,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 0.9467455621301775,
62
+ "grad_norm": 70.60668182373047,
63
+ "learning_rate": 3.900000000000001e-06,
64
+ "loss": 3.9586,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.9467455621301775,
69
+ "eval_loss": 0.8733280897140503,
70
+ "eval_runtime": 258.2817,
71
+ "eval_samples_per_second": 1.518,
72
+ "eval_steps_per_second": 0.759,
73
+ "eval_wer": 0.5994351612144034,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 1.0615384615384615,
78
+ "grad_norm": 54.279884338378906,
79
+ "learning_rate": 4.4e-06,
80
+ "loss": 3.2145,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 1.1798816568047337,
85
+ "grad_norm": 50.64502716064453,
86
+ "learning_rate": 4.9000000000000005e-06,
87
+ "loss": 3.1628,
88
+ "step": 250
89
+ },
90
+ {
91
+ "epoch": 1.298224852071006,
92
+ "grad_norm": 62.229068756103516,
93
+ "learning_rate": 5.400000000000001e-06,
94
+ "loss": 2.9053,
95
+ "step": 275
96
+ },
97
+ {
98
+ "epoch": 1.4165680473372781,
99
+ "grad_norm": 48.52084732055664,
100
+ "learning_rate": 5.9e-06,
101
+ "loss": 2.8052,
102
+ "step": 300
103
+ },
104
+ {
105
+ "epoch": 1.5349112426035503,
106
+ "grad_norm": 45.87370300292969,
107
+ "learning_rate": 6.4000000000000006e-06,
108
+ "loss": 2.7131,
109
+ "step": 325
110
+ },
111
+ {
112
+ "epoch": 1.6532544378698226,
113
+ "grad_norm": 60.52415084838867,
114
+ "learning_rate": 6.9e-06,
115
+ "loss": 2.7482,
116
+ "step": 350
117
+ },
118
+ {
119
+ "epoch": 1.7715976331360945,
120
+ "grad_norm": 51.26884078979492,
121
+ "learning_rate": 7.4e-06,
122
+ "loss": 2.8582,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 1.8899408284023669,
127
+ "grad_norm": 51.80022430419922,
128
+ "learning_rate": 7.9e-06,
129
+ "loss": 2.4999,
130
+ "step": 400
131
+ },
132
+ {
133
+ "epoch": 1.8899408284023669,
134
+ "eval_loss": 0.6725602149963379,
135
+ "eval_runtime": 255.9417,
136
+ "eval_samples_per_second": 1.532,
137
+ "eval_steps_per_second": 0.766,
138
+ "eval_wer": 0.46481525064721113,
139
+ "step": 400
140
+ },
141
+ {
142
+ "epoch": 2.0047337278106507,
143
+ "grad_norm": 35.06719207763672,
144
+ "learning_rate": 8.400000000000001e-06,
145
+ "loss": 2.1474,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 2.123076923076923,
150
+ "grad_norm": 40.39971160888672,
151
+ "learning_rate": 8.900000000000001e-06,
152
+ "loss": 1.4558,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 2.2414201183431954,
157
+ "grad_norm": 37.043128967285156,
158
+ "learning_rate": 9.4e-06,
159
+ "loss": 1.6915,
160
+ "step": 475
161
+ },
162
+ {
163
+ "epoch": 2.3597633136094673,
164
+ "grad_norm": 38.598167419433594,
165
+ "learning_rate": 9.9e-06,
166
+ "loss": 1.7167,
167
+ "step": 500
168
+ },
169
+ {
170
+ "epoch": 2.4781065088757397,
171
+ "grad_norm": 46.40306091308594,
172
+ "learning_rate": 9.965694682675816e-06,
173
+ "loss": 1.6127,
174
+ "step": 525
175
+ },
176
+ {
177
+ "epoch": 2.596449704142012,
178
+ "grad_norm": 32.87551498413086,
179
+ "learning_rate": 9.922813036020584e-06,
180
+ "loss": 1.6845,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 2.714792899408284,
185
+ "grad_norm": 19.153915405273438,
186
+ "learning_rate": 9.879931389365352e-06,
187
+ "loss": 1.4961,
188
+ "step": 575
189
+ },
190
+ {
191
+ "epoch": 2.8331360946745563,
192
+ "grad_norm": 28.79606819152832,
193
+ "learning_rate": 9.837049742710121e-06,
194
+ "loss": 1.7047,
195
+ "step": 600
196
+ },
197
+ {
198
+ "epoch": 2.8331360946745563,
199
+ "eval_loss": 0.6522520184516907,
200
+ "eval_runtime": 254.5133,
201
+ "eval_samples_per_second": 1.54,
202
+ "eval_steps_per_second": 0.77,
203
+ "eval_wer": 0.45846081430924923,
204
+ "step": 600
205
+ },
206
+ {
207
+ "epoch": 2.9514792899408286,
208
+ "grad_norm": 39.831016540527344,
209
+ "learning_rate": 9.794168096054889e-06,
210
+ "loss": 1.8359,
211
+ "step": 625
212
+ },
213
+ {
214
+ "epoch": 3.0662721893491125,
215
+ "grad_norm": 32.2681999206543,
216
+ "learning_rate": 9.751286449399657e-06,
217
+ "loss": 1.1873,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 3.184615384615385,
222
+ "grad_norm": 23.113107681274414,
223
+ "learning_rate": 9.708404802744426e-06,
224
+ "loss": 0.8456,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 3.3029585798816568,
229
+ "grad_norm": 24.971742630004883,
230
+ "learning_rate": 9.665523156089196e-06,
231
+ "loss": 0.7569,
232
+ "step": 700
233
+ },
234
+ {
235
+ "epoch": 3.421301775147929,
236
+ "grad_norm": 18.853492736816406,
237
+ "learning_rate": 9.622641509433963e-06,
238
+ "loss": 0.9036,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 3.5396449704142015,
243
+ "grad_norm": 23.712282180786133,
244
+ "learning_rate": 9.579759862778731e-06,
245
+ "loss": 0.7134,
246
+ "step": 750
247
+ },
248
+ {
249
+ "epoch": 3.6579881656804734,
250
+ "grad_norm": 38.5533332824707,
251
+ "learning_rate": 9.536878216123499e-06,
252
+ "loss": 0.9859,
253
+ "step": 775
254
+ },
255
+ {
256
+ "epoch": 3.7763313609467457,
257
+ "grad_norm": 38.32489776611328,
258
+ "learning_rate": 9.493996569468268e-06,
259
+ "loss": 0.9573,
260
+ "step": 800
261
+ },
262
+ {
263
+ "epoch": 3.7763313609467457,
264
+ "eval_loss": 0.6425132155418396,
265
+ "eval_runtime": 261.773,
266
+ "eval_samples_per_second": 1.497,
267
+ "eval_steps_per_second": 0.749,
268
+ "eval_wer": 0.4497528830313015,
269
+ "step": 800
270
+ },
271
+ {
272
+ "epoch": 3.8946745562130176,
273
+ "grad_norm": 22.206430435180664,
274
+ "learning_rate": 9.451114922813038e-06,
275
+ "loss": 0.876,
276
+ "step": 825
277
+ },
278
+ {
279
+ "epoch": 4.0094674556213015,
280
+ "grad_norm": 17.389280319213867,
281
+ "learning_rate": 9.408233276157806e-06,
282
+ "loss": 0.8495,
283
+ "step": 850
284
+ },
285
+ {
286
+ "epoch": 4.127810650887574,
287
+ "grad_norm": 15.826939582824707,
288
+ "learning_rate": 9.365351629502573e-06,
289
+ "loss": 0.3702,
290
+ "step": 875
291
+ },
292
+ {
293
+ "epoch": 4.246153846153846,
294
+ "grad_norm": 19.050931930541992,
295
+ "learning_rate": 9.322469982847341e-06,
296
+ "loss": 0.4092,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 4.364497041420118,
301
+ "grad_norm": 18.2451114654541,
302
+ "learning_rate": 9.27958833619211e-06,
303
+ "loss": 0.3743,
304
+ "step": 925
305
+ },
306
+ {
307
+ "epoch": 4.482840236686391,
308
+ "grad_norm": 12.22165298461914,
309
+ "learning_rate": 9.236706689536878e-06,
310
+ "loss": 0.4114,
311
+ "step": 950
312
+ },
313
+ {
314
+ "epoch": 4.601183431952663,
315
+ "grad_norm": 14.346109390258789,
316
+ "learning_rate": 9.193825042881648e-06,
317
+ "loss": 0.3816,
318
+ "step": 975
319
+ },
320
+ {
321
+ "epoch": 4.719526627218935,
322
+ "grad_norm": 56.37358474731445,
323
+ "learning_rate": 9.150943396226416e-06,
324
+ "loss": 0.4029,
325
+ "step": 1000
326
+ },
327
+ {
328
+ "epoch": 4.719526627218935,
329
+ "eval_loss": 0.6656792163848877,
330
+ "eval_runtime": 253.5608,
331
+ "eval_samples_per_second": 1.546,
332
+ "eval_steps_per_second": 0.773,
333
+ "eval_wer": 0.404330430689574,
334
+ "step": 1000
335
+ },
336
+ {
337
+ "epoch": 4.8378698224852075,
338
+ "grad_norm": 23.6176700592041,
339
+ "learning_rate": 9.108061749571185e-06,
340
+ "loss": 0.4506,
341
+ "step": 1025
342
+ },
343
+ {
344
+ "epoch": 4.956213017751479,
345
+ "grad_norm": 23.03661346435547,
346
+ "learning_rate": 9.065180102915953e-06,
347
+ "loss": 0.4928,
348
+ "step": 1050
349
+ },
350
+ {
351
+ "epoch": 5.071005917159764,
352
+ "grad_norm": 25.316198348999023,
353
+ "learning_rate": 9.02229845626072e-06,
354
+ "loss": 0.2739,
355
+ "step": 1075
356
+ },
357
+ {
358
+ "epoch": 5.189349112426036,
359
+ "grad_norm": 6.165097713470459,
360
+ "learning_rate": 8.97941680960549e-06,
361
+ "loss": 0.1914,
362
+ "step": 1100
363
+ },
364
+ {
365
+ "epoch": 5.3076923076923075,
366
+ "grad_norm": 20.19268226623535,
367
+ "learning_rate": 8.936535162950258e-06,
368
+ "loss": 0.221,
369
+ "step": 1125
370
+ },
371
+ {
372
+ "epoch": 5.42603550295858,
373
+ "grad_norm": 12.469457626342773,
374
+ "learning_rate": 8.893653516295027e-06,
375
+ "loss": 0.199,
376
+ "step": 1150
377
+ },
378
+ {
379
+ "epoch": 5.544378698224852,
380
+ "grad_norm": 14.28165054321289,
381
+ "learning_rate": 8.850771869639795e-06,
382
+ "loss": 0.2235,
383
+ "step": 1175
384
+ },
385
+ {
386
+ "epoch": 5.662721893491124,
387
+ "grad_norm": 15.683744430541992,
388
+ "learning_rate": 8.807890222984563e-06,
389
+ "loss": 0.2311,
390
+ "step": 1200
391
+ },
392
+ {
393
+ "epoch": 5.662721893491124,
394
+ "eval_loss": 0.6909866333007812,
395
+ "eval_runtime": 253.3291,
396
+ "eval_samples_per_second": 1.547,
397
+ "eval_steps_per_second": 0.774,
398
+ "eval_wer": 0.41868674982348786,
399
+ "step": 1200
400
+ },
401
+ {
402
+ "epoch": 5.781065088757396,
403
+ "grad_norm": 11.54914665222168,
404
+ "learning_rate": 8.765008576329332e-06,
405
+ "loss": 0.2098,
406
+ "step": 1225
407
+ },
408
+ {
409
+ "epoch": 5.899408284023669,
410
+ "grad_norm": 18.604154586791992,
411
+ "learning_rate": 8.722126929674101e-06,
412
+ "loss": 0.1964,
413
+ "step": 1250
414
+ },
415
+ {
416
+ "epoch": 6.014201183431953,
417
+ "grad_norm": 13.384803771972656,
418
+ "learning_rate": 8.67924528301887e-06,
419
+ "loss": 0.2031,
420
+ "step": 1275
421
+ },
422
+ {
423
+ "epoch": 6.132544378698225,
424
+ "grad_norm": 9.78126049041748,
425
+ "learning_rate": 8.636363636363637e-06,
426
+ "loss": 0.1747,
427
+ "step": 1300
428
+ },
429
+ {
430
+ "epoch": 6.250887573964497,
431
+ "grad_norm": 8.569636344909668,
432
+ "learning_rate": 8.593481989708405e-06,
433
+ "loss": 0.1078,
434
+ "step": 1325
435
+ },
436
+ {
437
+ "epoch": 6.36923076923077,
438
+ "grad_norm": 16.902490615844727,
439
+ "learning_rate": 8.550600343053174e-06,
440
+ "loss": 0.1253,
441
+ "step": 1350
442
+ },
443
+ {
444
+ "epoch": 6.487573964497042,
445
+ "grad_norm": 15.703929901123047,
446
+ "learning_rate": 8.507718696397942e-06,
447
+ "loss": 0.1213,
448
+ "step": 1375
449
+ },
450
+ {
451
+ "epoch": 6.6059171597633135,
452
+ "grad_norm": 10.21462631225586,
453
+ "learning_rate": 8.464837049742711e-06,
454
+ "loss": 0.1545,
455
+ "step": 1400
456
+ },
457
+ {
458
+ "epoch": 6.6059171597633135,
459
+ "eval_loss": 0.720788836479187,
460
+ "eval_runtime": 248.7794,
461
+ "eval_samples_per_second": 1.576,
462
+ "eval_steps_per_second": 0.788,
463
+ "eval_wer": 0.38644386914568135,
464
+ "step": 1400
465
+ },
466
+ {
467
+ "epoch": 6.6059171597633135,
468
+ "step": 1400,
469
+ "total_flos": 1.138790955810816e+19,
470
+ "train_loss": 1.9424426797458103,
471
+ "train_runtime": 4520.3552,
472
+ "train_samples_per_second": 11.209,
473
+ "train_steps_per_second": 1.4
474
+ }
475
+ ],
476
+ "logging_steps": 25,
477
+ "max_steps": 6330,
478
+ "num_input_tokens_seen": 0,
479
+ "num_train_epochs": 30,
480
+ "save_steps": 200,
481
+ "stateful_callbacks": {
482
+ "EarlyStoppingCallback": {
483
+ "args": {
484
+ "early_stopping_patience": 3,
485
+ "early_stopping_threshold": 0.0
486
+ },
487
+ "attributes": {
488
+ "early_stopping_patience_counter": 3
489
+ }
490
+ },
491
+ "TrainerControl": {
492
+ "args": {
493
+ "should_epoch_stop": false,
494
+ "should_evaluate": false,
495
+ "should_log": false,
496
+ "should_save": true,
497
+ "should_training_stop": true
498
+ },
499
+ "attributes": {}
500
+ }
501
+ },
502
+ "total_flos": 1.138790955810816e+19,
503
+ "train_batch_size": 2,
504
+ "trial_name": null,
505
+ "trial_params": null
506
+ }