csikasote commited on
Commit
442c0ff
·
verified ·
1 Parent(s): 67f5f62

End of training

Browse files
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-bemgen-balanced-model
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-bemgen-balanced-model
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.5771
22
- - Wer: 0.3991
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - bemgen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-bemgen-balanced-model
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: bemgen
19
+ type: bemgen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.4413347685683531
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-bemgen-balanced-model
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the bemgen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.5412
34
+ - Wer: 0.4413
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.1683168316831685,
3
+ "eval_loss": 0.5411643981933594,
4
+ "eval_runtime": 276.8379,
5
+ "eval_samples": 504,
6
+ "eval_samples_per_second": 1.821,
7
+ "eval_steps_per_second": 0.91,
8
+ "eval_wer": 0.4413347685683531,
9
+ "total_flos": 1.306068100153344e+19,
10
+ "train_loss": 2.189091945886612,
11
+ "train_runtime": 5496.1435,
12
+ "train_samples": 4039,
13
+ "train_samples_per_second": 7.278,
14
+ "train_steps_per_second": 0.91
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.1683168316831685,
3
+ "eval_loss": 0.5411643981933594,
4
+ "eval_runtime": 276.8379,
5
+ "eval_samples": 504,
6
+ "eval_samples_per_second": 1.821,
7
+ "eval_steps_per_second": 0.91,
8
+ "eval_wer": 0.4413347685683531
9
+ }
runs/Dec26_16-52-58_srvrocgpu011.uct.ac.za/events.out.tfevents.1735230876.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19589e6f0a11f069f207b96d31200d0f2e1f4f8097ec302006aaa570a3252d40
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.1683168316831685,
3
+ "total_flos": 1.306068100153344e+19,
4
+ "train_loss": 2.189091945886612,
5
+ "train_runtime": 5496.1435,
6
+ "train_samples": 4039,
7
+ "train_samples_per_second": 7.278,
8
+ "train_steps_per_second": 0.91
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5411643981933594,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-bemgen-balanced-model/checkpoint-1000",
4
+ "epoch": 3.1683168316831685,
5
+ "eval_steps": 200,
6
+ "global_step": 1600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04950495049504951,
13
+ "grad_norm": 121.81847381591797,
14
+ "learning_rate": 4.0000000000000003e-07,
15
+ "loss": 11.6499,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.09900990099009901,
20
+ "grad_norm": 104.46240234375,
21
+ "learning_rate": 9.000000000000001e-07,
22
+ "loss": 9.6493,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.1485148514851485,
27
+ "grad_norm": 105.9942626953125,
28
+ "learning_rate": 1.3800000000000001e-06,
29
+ "loss": 7.7745,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.19801980198019803,
34
+ "grad_norm": 86.44505310058594,
35
+ "learning_rate": 1.8800000000000002e-06,
36
+ "loss": 6.0363,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.24752475247524752,
41
+ "grad_norm": 89.36949920654297,
42
+ "learning_rate": 2.38e-06,
43
+ "loss": 4.9289,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.297029702970297,
48
+ "grad_norm": 67.95636749267578,
49
+ "learning_rate": 2.88e-06,
50
+ "loss": 4.2784,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 0.3465346534653465,
55
+ "grad_norm": 71.39818572998047,
56
+ "learning_rate": 3.3800000000000007e-06,
57
+ "loss": 3.8983,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 0.39603960396039606,
62
+ "grad_norm": 77.7619400024414,
63
+ "learning_rate": 3.88e-06,
64
+ "loss": 3.7045,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.39603960396039606,
69
+ "eval_loss": 0.9162073135375977,
70
+ "eval_runtime": 279.8105,
71
+ "eval_samples_per_second": 1.801,
72
+ "eval_steps_per_second": 0.901,
73
+ "eval_wer": 0.6826695371367061,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 0.44554455445544555,
78
+ "grad_norm": 65.45413208007812,
79
+ "learning_rate": 4.38e-06,
80
+ "loss": 3.4515,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 0.49504950495049505,
85
+ "grad_norm": 83.95701599121094,
86
+ "learning_rate": 4.880000000000001e-06,
87
+ "loss": 3.3691,
88
+ "step": 250
89
+ },
90
+ {
91
+ "epoch": 0.5445544554455446,
92
+ "grad_norm": 60.99998092651367,
93
+ "learning_rate": 5.380000000000001e-06,
94
+ "loss": 3.1637,
95
+ "step": 275
96
+ },
97
+ {
98
+ "epoch": 0.594059405940594,
99
+ "grad_norm": 86.96224212646484,
100
+ "learning_rate": 5.8800000000000005e-06,
101
+ "loss": 3.015,
102
+ "step": 300
103
+ },
104
+ {
105
+ "epoch": 0.6435643564356436,
106
+ "grad_norm": 75.70954132080078,
107
+ "learning_rate": 6.380000000000001e-06,
108
+ "loss": 2.9369,
109
+ "step": 325
110
+ },
111
+ {
112
+ "epoch": 0.693069306930693,
113
+ "grad_norm": 68.08271026611328,
114
+ "learning_rate": 6.88e-06,
115
+ "loss": 2.7672,
116
+ "step": 350
117
+ },
118
+ {
119
+ "epoch": 0.7425742574257426,
120
+ "grad_norm": 59.94082260131836,
121
+ "learning_rate": 7.3800000000000005e-06,
122
+ "loss": 2.7812,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 0.7920792079207921,
127
+ "grad_norm": 63.15815353393555,
128
+ "learning_rate": 7.88e-06,
129
+ "loss": 2.687,
130
+ "step": 400
131
+ },
132
+ {
133
+ "epoch": 0.7920792079207921,
134
+ "eval_loss": 0.6817564368247986,
135
+ "eval_runtime": 278.5541,
136
+ "eval_samples_per_second": 1.809,
137
+ "eval_steps_per_second": 0.905,
138
+ "eval_wer": 0.5351991388589882,
139
+ "step": 400
140
+ },
141
+ {
142
+ "epoch": 0.8415841584158416,
143
+ "grad_norm": 66.49015808105469,
144
+ "learning_rate": 8.380000000000001e-06,
145
+ "loss": 2.6355,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 0.8910891089108911,
150
+ "grad_norm": 69.2128677368164,
151
+ "learning_rate": 8.880000000000001e-06,
152
+ "loss": 2.5594,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 0.9405940594059405,
157
+ "grad_norm": 82.29959106445312,
158
+ "learning_rate": 9.38e-06,
159
+ "loss": 2.6779,
160
+ "step": 475
161
+ },
162
+ {
163
+ "epoch": 0.9900990099009901,
164
+ "grad_norm": 75.67899322509766,
165
+ "learning_rate": 9.88e-06,
166
+ "loss": 2.6395,
167
+ "step": 500
168
+ },
169
+ {
170
+ "epoch": 1.0396039603960396,
171
+ "grad_norm": 74.15483093261719,
172
+ "learning_rate": 9.957777777777779e-06,
173
+ "loss": 1.9931,
174
+ "step": 525
175
+ },
176
+ {
177
+ "epoch": 1.0891089108910892,
178
+ "grad_norm": 49.705169677734375,
179
+ "learning_rate": 9.902222222222223e-06,
180
+ "loss": 1.7258,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 1.1386138613861387,
185
+ "grad_norm": 37.47819519042969,
186
+ "learning_rate": 9.846666666666668e-06,
187
+ "loss": 1.8517,
188
+ "step": 575
189
+ },
190
+ {
191
+ "epoch": 1.188118811881188,
192
+ "grad_norm": 50.20145034790039,
193
+ "learning_rate": 9.791111111111112e-06,
194
+ "loss": 1.7185,
195
+ "step": 600
196
+ },
197
+ {
198
+ "epoch": 1.188118811881188,
199
+ "eval_loss": 0.6266342401504517,
200
+ "eval_runtime": 282.9063,
201
+ "eval_samples_per_second": 1.782,
202
+ "eval_steps_per_second": 0.891,
203
+ "eval_wer": 0.4988159311087191,
204
+ "step": 600
205
+ },
206
+ {
207
+ "epoch": 1.2376237623762376,
208
+ "grad_norm": 77.02053833007812,
209
+ "learning_rate": 9.735555555555556e-06,
210
+ "loss": 1.8482,
211
+ "step": 625
212
+ },
213
+ {
214
+ "epoch": 1.2871287128712872,
215
+ "grad_norm": 73.36985778808594,
216
+ "learning_rate": 9.68e-06,
217
+ "loss": 1.834,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 1.3366336633663367,
222
+ "grad_norm": 48.02408981323242,
223
+ "learning_rate": 9.624444444444445e-06,
224
+ "loss": 1.6974,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 1.386138613861386,
229
+ "grad_norm": 54.08694076538086,
230
+ "learning_rate": 9.56888888888889e-06,
231
+ "loss": 1.7489,
232
+ "step": 700
233
+ },
234
+ {
235
+ "epoch": 1.4356435643564356,
236
+ "grad_norm": 39.281005859375,
237
+ "learning_rate": 9.513333333333334e-06,
238
+ "loss": 1.8013,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 1.4851485148514851,
243
+ "grad_norm": 43.782196044921875,
244
+ "learning_rate": 9.457777777777778e-06,
245
+ "loss": 1.7419,
246
+ "step": 750
247
+ },
248
+ {
249
+ "epoch": 1.5346534653465347,
250
+ "grad_norm": 40.640323638916016,
251
+ "learning_rate": 9.402222222222222e-06,
252
+ "loss": 1.7863,
253
+ "step": 775
254
+ },
255
+ {
256
+ "epoch": 1.5841584158415842,
257
+ "grad_norm": 43.944881439208984,
258
+ "learning_rate": 9.346666666666666e-06,
259
+ "loss": 1.7232,
260
+ "step": 800
261
+ },
262
+ {
263
+ "epoch": 1.5841584158415842,
264
+ "eval_loss": 0.5673945546150208,
265
+ "eval_runtime": 279.2797,
266
+ "eval_samples_per_second": 1.805,
267
+ "eval_steps_per_second": 0.902,
268
+ "eval_wer": 0.4592034445640474,
269
+ "step": 800
270
+ },
271
+ {
272
+ "epoch": 1.6336633663366338,
273
+ "grad_norm": 53.22232437133789,
274
+ "learning_rate": 9.291111111111112e-06,
275
+ "loss": 1.6903,
276
+ "step": 825
277
+ },
278
+ {
279
+ "epoch": 1.6831683168316833,
280
+ "grad_norm": 41.854610443115234,
281
+ "learning_rate": 9.235555555555556e-06,
282
+ "loss": 1.656,
283
+ "step": 850
284
+ },
285
+ {
286
+ "epoch": 1.7326732673267327,
287
+ "grad_norm": 38.988529205322266,
288
+ "learning_rate": 9.180000000000002e-06,
289
+ "loss": 1.5997,
290
+ "step": 875
291
+ },
292
+ {
293
+ "epoch": 1.7821782178217822,
294
+ "grad_norm": 54.63161087036133,
295
+ "learning_rate": 9.124444444444444e-06,
296
+ "loss": 1.6233,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 1.8316831683168315,
301
+ "grad_norm": 41.60354232788086,
302
+ "learning_rate": 9.06888888888889e-06,
303
+ "loss": 1.4889,
304
+ "step": 925
305
+ },
306
+ {
307
+ "epoch": 1.881188118811881,
308
+ "grad_norm": 42.82672119140625,
309
+ "learning_rate": 9.013333333333334e-06,
310
+ "loss": 1.6465,
311
+ "step": 950
312
+ },
313
+ {
314
+ "epoch": 1.9306930693069306,
315
+ "grad_norm": 50.5968132019043,
316
+ "learning_rate": 8.957777777777778e-06,
317
+ "loss": 1.6738,
318
+ "step": 975
319
+ },
320
+ {
321
+ "epoch": 1.9801980198019802,
322
+ "grad_norm": 46.50210952758789,
323
+ "learning_rate": 8.902222222222224e-06,
324
+ "loss": 1.6083,
325
+ "step": 1000
326
+ },
327
+ {
328
+ "epoch": 1.9801980198019802,
329
+ "eval_loss": 0.5411643981933594,
330
+ "eval_runtime": 277.778,
331
+ "eval_samples_per_second": 1.814,
332
+ "eval_steps_per_second": 0.907,
333
+ "eval_wer": 0.4413347685683531,
334
+ "step": 1000
335
+ },
336
+ {
337
+ "epoch": 2.0297029702970297,
338
+ "grad_norm": 25.421289443969727,
339
+ "learning_rate": 8.846666666666668e-06,
340
+ "loss": 1.1495,
341
+ "step": 1025
342
+ },
343
+ {
344
+ "epoch": 2.0792079207920793,
345
+ "grad_norm": 34.11124038696289,
346
+ "learning_rate": 8.791111111111112e-06,
347
+ "loss": 0.8415,
348
+ "step": 1050
349
+ },
350
+ {
351
+ "epoch": 2.128712871287129,
352
+ "grad_norm": 28.867008209228516,
353
+ "learning_rate": 8.735555555555556e-06,
354
+ "loss": 0.7942,
355
+ "step": 1075
356
+ },
357
+ {
358
+ "epoch": 2.1782178217821784,
359
+ "grad_norm": 28.74831199645996,
360
+ "learning_rate": 8.68e-06,
361
+ "loss": 0.894,
362
+ "step": 1100
363
+ },
364
+ {
365
+ "epoch": 2.227722772277228,
366
+ "grad_norm": 31.9539794921875,
367
+ "learning_rate": 8.624444444444446e-06,
368
+ "loss": 0.9315,
369
+ "step": 1125
370
+ },
371
+ {
372
+ "epoch": 2.2772277227722775,
373
+ "grad_norm": 25.5980224609375,
374
+ "learning_rate": 8.56888888888889e-06,
375
+ "loss": 0.8735,
376
+ "step": 1150
377
+ },
378
+ {
379
+ "epoch": 2.3267326732673266,
380
+ "grad_norm": 30.295211791992188,
381
+ "learning_rate": 8.513333333333335e-06,
382
+ "loss": 0.7829,
383
+ "step": 1175
384
+ },
385
+ {
386
+ "epoch": 2.376237623762376,
387
+ "grad_norm": 38.251556396484375,
388
+ "learning_rate": 8.457777777777778e-06,
389
+ "loss": 0.7643,
390
+ "step": 1200
391
+ },
392
+ {
393
+ "epoch": 2.376237623762376,
394
+ "eval_loss": 0.565162181854248,
395
+ "eval_runtime": 302.1807,
396
+ "eval_samples_per_second": 1.668,
397
+ "eval_steps_per_second": 0.834,
398
+ "eval_wer": 0.4279870828848224,
399
+ "step": 1200
400
+ },
401
+ {
402
+ "epoch": 2.4257425742574257,
403
+ "grad_norm": 22.155136108398438,
404
+ "learning_rate": 8.402222222222223e-06,
405
+ "loss": 0.8875,
406
+ "step": 1225
407
+ },
408
+ {
409
+ "epoch": 2.4752475247524752,
410
+ "grad_norm": 29.48054313659668,
411
+ "learning_rate": 8.346666666666668e-06,
412
+ "loss": 0.8138,
413
+ "step": 1250
414
+ },
415
+ {
416
+ "epoch": 2.5247524752475248,
417
+ "grad_norm": 31.146007537841797,
418
+ "learning_rate": 8.291111111111112e-06,
419
+ "loss": 0.8036,
420
+ "step": 1275
421
+ },
422
+ {
423
+ "epoch": 2.5742574257425743,
424
+ "grad_norm": 39.311256408691406,
425
+ "learning_rate": 8.235555555555557e-06,
426
+ "loss": 0.8389,
427
+ "step": 1300
428
+ },
429
+ {
430
+ "epoch": 2.623762376237624,
431
+ "grad_norm": 21.874267578125,
432
+ "learning_rate": 8.18e-06,
433
+ "loss": 0.8243,
434
+ "step": 1325
435
+ },
436
+ {
437
+ "epoch": 2.6732673267326734,
438
+ "grad_norm": 27.84779167175293,
439
+ "learning_rate": 8.124444444444445e-06,
440
+ "loss": 1.0702,
441
+ "step": 1350
442
+ },
443
+ {
444
+ "epoch": 2.7227722772277225,
445
+ "grad_norm": 41.43334197998047,
446
+ "learning_rate": 8.06888888888889e-06,
447
+ "loss": 0.8892,
448
+ "step": 1375
449
+ },
450
+ {
451
+ "epoch": 2.772277227722772,
452
+ "grad_norm": 36.145965576171875,
453
+ "learning_rate": 8.013333333333333e-06,
454
+ "loss": 0.8362,
455
+ "step": 1400
456
+ },
457
+ {
458
+ "epoch": 2.772277227722772,
459
+ "eval_loss": 0.5454630851745605,
460
+ "eval_runtime": 277.6054,
461
+ "eval_samples_per_second": 1.816,
462
+ "eval_steps_per_second": 0.908,
463
+ "eval_wer": 0.40516684607104414,
464
+ "step": 1400
465
+ },
466
+ {
467
+ "epoch": 2.8217821782178216,
468
+ "grad_norm": 82.11780548095703,
469
+ "learning_rate": 7.957777777777779e-06,
470
+ "loss": 0.7491,
471
+ "step": 1425
472
+ },
473
+ {
474
+ "epoch": 2.871287128712871,
475
+ "grad_norm": 28.929828643798828,
476
+ "learning_rate": 7.902222222222223e-06,
477
+ "loss": 0.9076,
478
+ "step": 1450
479
+ },
480
+ {
481
+ "epoch": 2.9207920792079207,
482
+ "grad_norm": 29.58544921875,
483
+ "learning_rate": 7.846666666666667e-06,
484
+ "loss": 0.8593,
485
+ "step": 1475
486
+ },
487
+ {
488
+ "epoch": 2.9702970297029703,
489
+ "grad_norm": 36.51881790161133,
490
+ "learning_rate": 7.791111111111111e-06,
491
+ "loss": 0.7739,
492
+ "step": 1500
493
+ },
494
+ {
495
+ "epoch": 3.01980198019802,
496
+ "grad_norm": 13.50954532623291,
497
+ "learning_rate": 7.735555555555557e-06,
498
+ "loss": 0.6666,
499
+ "step": 1525
500
+ },
501
+ {
502
+ "epoch": 3.0693069306930694,
503
+ "grad_norm": 15.712291717529297,
504
+ "learning_rate": 7.680000000000001e-06,
505
+ "loss": 0.3365,
506
+ "step": 1550
507
+ },
508
+ {
509
+ "epoch": 3.118811881188119,
510
+ "grad_norm": 22.804317474365234,
511
+ "learning_rate": 7.624444444444445e-06,
512
+ "loss": 0.3306,
513
+ "step": 1575
514
+ },
515
+ {
516
+ "epoch": 3.1683168316831685,
517
+ "grad_norm": 22.823379516601562,
518
+ "learning_rate": 7.56888888888889e-06,
519
+ "loss": 0.422,
520
+ "step": 1600
521
+ },
522
+ {
523
+ "epoch": 3.1683168316831685,
524
+ "eval_loss": 0.577109694480896,
525
+ "eval_runtime": 282.3788,
526
+ "eval_samples_per_second": 1.785,
527
+ "eval_steps_per_second": 0.892,
528
+ "eval_wer": 0.3991388589881593,
529
+ "step": 1600
530
+ },
531
+ {
532
+ "epoch": 3.1683168316831685,
533
+ "step": 1600,
534
+ "total_flos": 1.306068100153344e+19,
535
+ "train_loss": 2.189091945886612,
536
+ "train_runtime": 5496.1435,
537
+ "train_samples_per_second": 7.278,
538
+ "train_steps_per_second": 0.91
539
+ }
540
+ ],
541
+ "logging_steps": 25,
542
+ "max_steps": 5000,
543
+ "num_input_tokens_seen": 0,
544
+ "num_train_epochs": 10,
545
+ "save_steps": 200,
546
+ "stateful_callbacks": {
547
+ "EarlyStoppingCallback": {
548
+ "args": {
549
+ "early_stopping_patience": 3,
550
+ "early_stopping_threshold": 0.0
551
+ },
552
+ "attributes": {
553
+ "early_stopping_patience_counter": 3
554
+ }
555
+ },
556
+ "TrainerControl": {
557
+ "args": {
558
+ "should_epoch_stop": false,
559
+ "should_evaluate": false,
560
+ "should_log": false,
561
+ "should_save": true,
562
+ "should_training_stop": true
563
+ },
564
+ "attributes": {}
565
+ }
566
+ },
567
+ "total_flos": 1.306068100153344e+19,
568
+ "train_batch_size": 2,
569
+ "trial_name": null,
570
+ "trial_params": null
571
+ }