File size: 30,232 Bytes
19f2bf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 355,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "dpo_losses": 0.6931471824645996,
      "epoch": 0.0,
      "grad_norm": 1.6020024881522184,
      "learning_rate": 1.3888888888888887e-08,
      "logits/chosen": -2.861618995666504,
      "logits/rejected": -2.8205904960632324,
      "logps/chosen": -271.06011962890625,
      "logps/rejected": -211.1704559326172,
      "loss": 0.6931,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/margins_max": 0.0,
      "rewards/margins_min": 0.0,
      "rewards/margins_std": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "dpo_losses": 0.6933339834213257,
      "epoch": 0.03,
      "grad_norm": 17.749396515379228,
      "learning_rate": 1.3888888888888888e-07,
      "logits/chosen": -2.8337996006011963,
      "logits/rejected": -2.7912583351135254,
      "logps/chosen": -325.06781005859375,
      "logps/rejected": -274.9460754394531,
      "loss": 0.6995,
      "positive_losses": 0.07389768213033676,
      "rewards/accuracies": 0.3888888955116272,
      "rewards/chosen": -0.00017488877347204834,
      "rewards/margins": -0.0003703224065247923,
      "rewards/margins_max": 0.002196074463427067,
      "rewards/margins_min": -0.003493131836876273,
      "rewards/margins_std": 0.0025361739099025726,
      "rewards/rejected": 0.00019543366215657443,
      "step": 10
    },
    {
      "dpo_losses": 0.6930112838745117,
      "epoch": 0.06,
      "grad_norm": 16.42860549070251,
      "learning_rate": 2.7777777777777776e-07,
      "logits/chosen": -2.7250771522521973,
      "logits/rejected": -2.7066714763641357,
      "logps/chosen": -293.7167663574219,
      "logps/rejected": -215.6971893310547,
      "loss": 0.6969,
      "positive_losses": 0.0511661060154438,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.0010916258906945586,
      "rewards/margins": 0.00027377792866900563,
      "rewards/margins_max": 0.0035606161691248417,
      "rewards/margins_min": -0.0026067704893648624,
      "rewards/margins_std": 0.002710341941565275,
      "rewards/rejected": 0.0008178479038178921,
      "step": 20
    },
    {
      "dpo_losses": 0.692296028137207,
      "epoch": 0.08,
      "grad_norm": 8.629940663439823,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -2.8196403980255127,
      "logits/rejected": -2.750633716583252,
      "logps/chosen": -303.18206787109375,
      "logps/rejected": -232.16091918945312,
      "loss": 0.6932,
      "positive_losses": 0.0034084320068359375,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": 0.005238121375441551,
      "rewards/margins": 0.0017049547750502825,
      "rewards/margins_max": 0.0041273366659879684,
      "rewards/margins_min": -0.0010792845860123634,
      "rewards/margins_std": 0.0023812309373170137,
      "rewards/rejected": 0.003533167066052556,
      "step": 30
    },
    {
      "dpo_losses": 0.6920384764671326,
      "epoch": 0.11,
      "grad_norm": 1.7378063453273105,
      "learning_rate": 4.998060489154965e-07,
      "logits/chosen": -2.8426527976989746,
      "logits/rejected": -2.7624073028564453,
      "logps/chosen": -276.6007385253906,
      "logps/rejected": -225.0730438232422,
      "loss": 0.6922,
      "positive_losses": 0.0007575989002361894,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.008838965557515621,
      "rewards/margins": 0.0022226297296583652,
      "rewards/margins_max": 0.006442135665565729,
      "rewards/margins_min": -0.0014907930744811893,
      "rewards/margins_std": 0.0035486381966620684,
      "rewards/rejected": 0.006616336293518543,
      "step": 40
    },
    {
      "dpo_losses": 0.6905555725097656,
      "epoch": 0.14,
      "grad_norm": 9.354247049667382,
      "learning_rate": 4.976275538042932e-07,
      "logits/chosen": -2.810286283493042,
      "logits/rejected": -2.7382943630218506,
      "logps/chosen": -274.2152404785156,
      "logps/rejected": -233.04257202148438,
      "loss": 0.6905,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": 0.013549095019698143,
      "rewards/margins": 0.005201784893870354,
      "rewards/margins_max": 0.0127005185931921,
      "rewards/margins_min": -0.0008777756011113524,
      "rewards/margins_std": 0.006243027281016111,
      "rewards/rejected": 0.008347309194505215,
      "step": 50
    },
    {
      "dpo_losses": 0.6887227892875671,
      "epoch": 0.17,
      "grad_norm": 2.4332730738698984,
      "learning_rate": 4.930493069997119e-07,
      "logits/chosen": -2.749894142150879,
      "logits/rejected": -2.7108871936798096,
      "logps/chosen": -311.87884521484375,
      "logps/rejected": -261.8995361328125,
      "loss": 0.6896,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.875,
      "rewards/chosen": 0.017607735469937325,
      "rewards/margins": 0.008888162672519684,
      "rewards/margins_max": 0.017459740862250328,
      "rewards/margins_min": 0.0007526646950282156,
      "rewards/margins_std": 0.0074041737243533134,
      "rewards/rejected": 0.008719570934772491,
      "step": 60
    },
    {
      "dpo_losses": 0.6875914931297302,
      "epoch": 0.2,
      "grad_norm": 1.7936785587345936,
      "learning_rate": 4.861156761634013e-07,
      "logits/chosen": -2.7838871479034424,
      "logits/rejected": -2.7218618392944336,
      "logps/chosen": -320.61077880859375,
      "logps/rejected": -234.509033203125,
      "loss": 0.6877,
      "positive_losses": 0.0053993226028978825,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": 0.022504538297653198,
      "rewards/margins": 0.01117872167378664,
      "rewards/margins_max": 0.024569755420088768,
      "rewards/margins_min": 0.00035735839628614485,
      "rewards/margins_std": 0.0108140017837286,
      "rewards/rejected": 0.011325814761221409,
      "step": 70
    },
    {
      "dpo_losses": 0.6856142282485962,
      "epoch": 0.23,
      "grad_norm": 1.8616136810198376,
      "learning_rate": 4.768938549177392e-07,
      "logits/chosen": -2.8332719802856445,
      "logits/rejected": -2.7764153480529785,
      "logps/chosen": -318.5867614746094,
      "logps/rejected": -285.2149963378906,
      "loss": 0.6869,
      "positive_losses": 0.006173324771225452,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": 0.026837289333343506,
      "rewards/margins": 0.015200009569525719,
      "rewards/margins_max": 0.03601212427020073,
      "rewards/margins_min": -0.00037241075187921524,
      "rewards/margins_std": 0.01664547622203827,
      "rewards/rejected": 0.011637277901172638,
      "step": 80
    },
    {
      "dpo_losses": 0.6834368705749512,
      "epoch": 0.25,
      "grad_norm": 2.576979966636063,
      "learning_rate": 4.654732116743193e-07,
      "logits/chosen": -2.7666513919830322,
      "logits/rejected": -2.724118709564209,
      "logps/chosen": -272.710693359375,
      "logps/rejected": -197.66629028320312,
      "loss": 0.6833,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": 0.03284075856208801,
      "rewards/margins": 0.01963154971599579,
      "rewards/margins_max": 0.04466867819428444,
      "rewards/margins_min": 0.0018520345911383629,
      "rewards/margins_std": 0.020031433552503586,
      "rewards/rejected": 0.013209208846092224,
      "step": 90
    },
    {
      "dpo_losses": 0.6846113801002502,
      "epoch": 0.28,
      "grad_norm": 1.9012962136053702,
      "learning_rate": 4.519644235671752e-07,
      "logits/chosen": -2.8058769702911377,
      "logits/rejected": -2.7677180767059326,
      "logps/chosen": -278.3069763183594,
      "logps/rejected": -260.80572509765625,
      "loss": 0.6834,
      "positive_losses": 0.00875930767506361,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 0.031047578901052475,
      "rewards/margins": 0.01723078265786171,
      "rewards/margins_max": 0.03883099928498268,
      "rewards/margins_min": 0.00033125150366686285,
      "rewards/margins_std": 0.017453256994485855,
      "rewards/rejected": 0.013816798105835915,
      "step": 100
    },
    {
      "epoch": 0.28,
      "eval_dpo_losses": 0.6905006766319275,
      "eval_logits/chosen": -2.804429292678833,
      "eval_logits/rejected": -2.7657294273376465,
      "eval_logps/chosen": -282.1181335449219,
      "eval_logps/rejected": -256.64227294921875,
      "eval_loss": 0.6931570768356323,
      "eval_positive_losses": 0.02003120444715023,
      "eval_rewards/accuracies": 0.5929999947547913,
      "eval_rewards/chosen": 0.024752924218773842,
      "eval_rewards/margins": 0.005386360455304384,
      "eval_rewards/margins_max": 0.03411807864904404,
      "eval_rewards/margins_min": -0.019733905792236328,
      "eval_rewards/margins_std": 0.017771249637007713,
      "eval_rewards/rejected": 0.019366566091775894,
      "eval_runtime": 428.432,
      "eval_samples_per_second": 4.668,
      "eval_steps_per_second": 0.292,
      "step": 100
    },
    {
      "dpo_losses": 0.676801860332489,
      "epoch": 0.31,
      "grad_norm": 2.3464200895802714,
      "learning_rate": 4.364984038837727e-07,
      "logits/chosen": -2.87202787399292,
      "logits/rejected": -2.7757174968719482,
      "logps/chosen": -369.87457275390625,
      "logps/rejected": -285.02069091796875,
      "loss": 0.6771,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 0.0473114475607872,
      "rewards/margins": 0.03318057209253311,
      "rewards/margins_max": 0.06624362617731094,
      "rewards/margins_min": 0.005891180131584406,
      "rewards/margins_std": 0.026871800422668457,
      "rewards/rejected": 0.014130881056189537,
      "step": 110
    },
    {
      "dpo_losses": 0.6757510900497437,
      "epoch": 0.34,
      "grad_norm": 2.0654592381160293,
      "learning_rate": 4.1922503338800447e-07,
      "logits/chosen": -2.8409907817840576,
      "logits/rejected": -2.7864270210266113,
      "logps/chosen": -343.3132629394531,
      "logps/rejected": -264.656982421875,
      "loss": 0.6761,
      "positive_losses": 0.00038242340087890625,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": 0.05072961002588272,
      "rewards/margins": 0.035357214510440826,
      "rewards/margins_max": 0.0718097984790802,
      "rewards/margins_min": 0.007976613938808441,
      "rewards/margins_std": 0.029289904981851578,
      "rewards/rejected": 0.015372401103377342,
      "step": 120
    },
    {
      "dpo_losses": 0.672697901725769,
      "epoch": 0.37,
      "grad_norm": 1.805644520581111,
      "learning_rate": 4.003117078299021e-07,
      "logits/chosen": -2.833576202392578,
      "logits/rejected": -2.7511653900146484,
      "logps/chosen": -376.5670471191406,
      "logps/rejected": -298.7974548339844,
      "loss": 0.6728,
      "positive_losses": 0.007812881842255592,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.05797192454338074,
      "rewards/margins": 0.041637517511844635,
      "rewards/margins_max": 0.07995648682117462,
      "rewards/margins_min": 0.005986917298287153,
      "rewards/margins_std": 0.03285466879606247,
      "rewards/rejected": 0.016334403306245804,
      "step": 130
    },
    {
      "dpo_losses": 0.6723438501358032,
      "epoch": 0.39,
      "grad_norm": 1.4837668528318801,
      "learning_rate": 3.799417157181075e-07,
      "logits/chosen": -2.7876131534576416,
      "logits/rejected": -2.748243808746338,
      "logps/chosen": -312.4587707519531,
      "logps/rejected": -268.16253662109375,
      "loss": 0.6749,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": 0.059552956372499466,
      "rewards/margins": 0.04268346354365349,
      "rewards/margins_max": 0.09346815198659897,
      "rewards/margins_min": 0.005906702019274235,
      "rewards/margins_std": 0.04082999378442764,
      "rewards/rejected": 0.01686949096620083,
      "step": 140
    },
    {
      "dpo_losses": 0.6723035573959351,
      "epoch": 0.42,
      "grad_norm": 1.9734696023947083,
      "learning_rate": 3.583124620760659e-07,
      "logits/chosen": -2.7900147438049316,
      "logits/rejected": -2.7500596046447754,
      "logps/chosen": -282.7308654785156,
      "logps/rejected": -211.3845672607422,
      "loss": 0.6711,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": 0.05954126641154289,
      "rewards/margins": 0.04255921393632889,
      "rewards/margins_max": 0.08835546672344208,
      "rewards/margins_min": 0.005340501666069031,
      "rewards/margins_std": 0.037935130298137665,
      "rewards/rejected": 0.016982052475214005,
      "step": 150
    },
    {
      "dpo_losses": 0.669468343257904,
      "epoch": 0.45,
      "grad_norm": 1.6641482478870189,
      "learning_rate": 3.356335553954679e-07,
      "logits/chosen": -2.768503189086914,
      "logits/rejected": -2.7139506340026855,
      "logps/chosen": -293.73223876953125,
      "logps/rejected": -232.4127655029297,
      "loss": 0.6692,
      "positive_losses": 0.010938262566924095,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": 0.06814642250537872,
      "rewards/margins": 0.04834110662341118,
      "rewards/margins_max": 0.09320978820323944,
      "rewards/margins_min": 0.00712673831731081,
      "rewards/margins_std": 0.038901425898075104,
      "rewards/rejected": 0.019805317744612694,
      "step": 160
    },
    {
      "dpo_losses": 0.6628905534744263,
      "epoch": 0.48,
      "grad_norm": 1.8761085412198597,
      "learning_rate": 3.121247763262235e-07,
      "logits/chosen": -2.8206450939178467,
      "logits/rejected": -2.762035846710205,
      "logps/chosen": -321.90386962890625,
      "logps/rejected": -294.7594299316406,
      "loss": 0.6649,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": 0.07717735320329666,
      "rewards/margins": 0.062114737927913666,
      "rewards/margins_max": 0.11694135516881943,
      "rewards/margins_min": 0.011941976845264435,
      "rewards/margins_std": 0.0472017265856266,
      "rewards/rejected": 0.01506261806935072,
      "step": 170
    },
    {
      "dpo_losses": 0.6624481678009033,
      "epoch": 0.51,
      "grad_norm": 1.9655992636640798,
      "learning_rate": 2.880139477883347e-07,
      "logits/chosen": -2.8146770000457764,
      "logits/rejected": -2.726132869720459,
      "logps/chosen": -323.855712890625,
      "logps/rejected": -292.4509582519531,
      "loss": 0.6619,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 0.08045096695423126,
      "rewards/margins": 0.06311116367578506,
      "rewards/margins_max": 0.1282820701599121,
      "rewards/margins_min": 0.017489684745669365,
      "rewards/margins_std": 0.049531489610672,
      "rewards/rejected": 0.0173397995531559,
      "step": 180
    },
    {
      "dpo_losses": 0.6590775847434998,
      "epoch": 0.54,
      "grad_norm": 2.3566449266948037,
      "learning_rate": 2.635347271463544e-07,
      "logits/chosen": -2.776195526123047,
      "logits/rejected": -2.683864116668701,
      "logps/chosen": -313.2172546386719,
      "logps/rejected": -235.8129119873047,
      "loss": 0.6608,
      "positive_losses": 0.006581115536391735,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.0874362364411354,
      "rewards/margins": 0.07068866491317749,
      "rewards/margins_max": 0.1461794078350067,
      "rewards/margins_min": 0.012810202315449715,
      "rewards/margins_std": 0.06181079149246216,
      "rewards/rejected": 0.01674756594002247,
      "step": 190
    },
    {
      "dpo_losses": 0.659989058971405,
      "epoch": 0.56,
      "grad_norm": 8.082919972131291,
      "learning_rate": 2.3892434184240534e-07,
      "logits/chosen": -2.851855516433716,
      "logits/rejected": -2.7749085426330566,
      "logps/chosen": -330.7215881347656,
      "logps/rejected": -263.6206359863281,
      "loss": 0.6629,
      "positive_losses": 0.010635947808623314,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.08811243623495102,
      "rewards/margins": 0.06834487617015839,
      "rewards/margins_max": 0.13839691877365112,
      "rewards/margins_min": 0.01415681280195713,
      "rewards/margins_std": 0.05431779474020004,
      "rewards/rejected": 0.01976756379008293,
      "step": 200
    },
    {
      "epoch": 0.56,
      "eval_dpo_losses": 0.6859865784645081,
      "eval_logits/chosen": -2.787888765335083,
      "eval_logits/rejected": -2.7491748332977295,
      "eval_logps/chosen": -279.74639892578125,
      "eval_logps/rejected": -255.2262725830078,
      "eval_loss": 0.6976820230484009,
      "eval_positive_losses": 0.10420288890600204,
      "eval_rewards/accuracies": 0.5979999899864197,
      "eval_rewards/chosen": 0.04846998676657677,
      "eval_rewards/margins": 0.01494329422712326,
      "eval_rewards/margins_max": 0.08812851458787918,
      "eval_rewards/margins_min": -0.04890156909823418,
      "eval_rewards/margins_std": 0.04564943537116051,
      "eval_rewards/rejected": 0.033526696264743805,
      "eval_runtime": 427.8302,
      "eval_samples_per_second": 4.675,
      "eval_steps_per_second": 0.292,
      "step": 200
    },
    {
      "dpo_losses": 0.6545363068580627,
      "epoch": 0.59,
      "grad_norm": 1.9636450688854714,
      "learning_rate": 2.1442129043167873e-07,
      "logits/chosen": -2.754328966140747,
      "logits/rejected": -2.7271111011505127,
      "logps/chosen": -310.51593017578125,
      "logps/rejected": -254.80386352539062,
      "loss": 0.6579,
      "positive_losses": 0.019413376227021217,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.096683569252491,
      "rewards/margins": 0.08047457039356232,
      "rewards/margins_max": 0.16606834530830383,
      "rewards/margins_min": 0.009150387719273567,
      "rewards/margins_std": 0.07150334119796753,
      "rewards/rejected": 0.01620900072157383,
      "step": 210
    },
    {
      "dpo_losses": 0.6517983675003052,
      "epoch": 0.62,
      "grad_norm": 1.8542376801111211,
      "learning_rate": 1.9026303129961048e-07,
      "logits/chosen": -2.85148024559021,
      "logits/rejected": -2.75597882270813,
      "logps/chosen": -343.08294677734375,
      "logps/rejected": -272.4956359863281,
      "loss": 0.6555,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 0.1037331372499466,
      "rewards/margins": 0.08541466295719147,
      "rewards/margins_max": 0.1512610912322998,
      "rewards/margins_min": 0.02069752663373947,
      "rewards/margins_std": 0.05890106409788132,
      "rewards/rejected": 0.018318474292755127,
      "step": 220
    },
    {
      "dpo_losses": 0.6518235206604004,
      "epoch": 0.65,
      "grad_norm": 1.9608270986414384,
      "learning_rate": 1.6668368145931396e-07,
      "logits/chosen": -2.8535895347595215,
      "logits/rejected": -2.7599148750305176,
      "logps/chosen": -361.3349609375,
      "logps/rejected": -260.3254089355469,
      "loss": 0.6498,
      "positive_losses": 0.010654067620635033,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": 0.10340724140405655,
      "rewards/margins": 0.08572699129581451,
      "rewards/margins_max": 0.16569408774375916,
      "rewards/margins_min": 0.01854148879647255,
      "rewards/margins_std": 0.06613387167453766,
      "rewards/rejected": 0.017680250108242035,
      "step": 230
    },
    {
      "dpo_losses": 0.6590827703475952,
      "epoch": 0.68,
      "grad_norm": 1.5969745701990838,
      "learning_rate": 1.4391174773015834e-07,
      "logits/chosen": -2.8000075817108154,
      "logits/rejected": -2.729769229888916,
      "logps/chosen": -316.0329284667969,
      "logps/rejected": -281.4412841796875,
      "loss": 0.6592,
      "positive_losses": 0.009823227301239967,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": 0.0876411646604538,
      "rewards/margins": 0.07030560076236725,
      "rewards/margins_max": 0.14278724789619446,
      "rewards/margins_min": 0.013438734225928783,
      "rewards/margins_std": 0.05923761799931526,
      "rewards/rejected": 0.017335567623376846,
      "step": 240
    },
    {
      "dpo_losses": 0.6565552949905396,
      "epoch": 0.7,
      "grad_norm": 1.9164626662561326,
      "learning_rate": 1.2216791228457775e-07,
      "logits/chosen": -2.8003289699554443,
      "logits/rejected": -2.7232441902160645,
      "logps/chosen": -302.2171325683594,
      "logps/rejected": -249.5670928955078,
      "loss": 0.661,
      "positive_losses": 0.009924506768584251,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 0.090640127658844,
      "rewards/margins": 0.07586830854415894,
      "rewards/margins_max": 0.15460054576396942,
      "rewards/margins_min": 0.012039058841764927,
      "rewards/margins_std": 0.06554323434829712,
      "rewards/rejected": 0.014771823771297932,
      "step": 250
    },
    {
      "dpo_losses": 0.6541475057601929,
      "epoch": 0.73,
      "grad_norm": 1.7167350749240156,
      "learning_rate": 1.0166289402331391e-07,
      "logits/chosen": -2.8486251831054688,
      "logits/rejected": -2.7656264305114746,
      "logps/chosen": -282.35260009765625,
      "logps/rejected": -257.30596923828125,
      "loss": 0.6505,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 0.09740927070379257,
      "rewards/margins": 0.08115915954113007,
      "rewards/margins_max": 0.16602402925491333,
      "rewards/margins_min": 0.016714217141270638,
      "rewards/margins_std": 0.06957190483808517,
      "rewards/rejected": 0.016250116750597954,
      "step": 260
    },
    {
      "dpo_losses": 0.6572530269622803,
      "epoch": 0.76,
      "grad_norm": 2.017681285366355,
      "learning_rate": 8.259540650444734e-08,
      "logits/chosen": -2.7961807250976562,
      "logits/rejected": -2.743107318878174,
      "logps/chosen": -299.6520690917969,
      "logps/rejected": -260.46929931640625,
      "loss": 0.6604,
      "positive_losses": 0.046803951263427734,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.09463498741388321,
      "rewards/margins": 0.07433197647333145,
      "rewards/margins_max": 0.14396773278713226,
      "rewards/margins_min": 0.020581519231200218,
      "rewards/margins_std": 0.05590103194117546,
      "rewards/rejected": 0.020303016528487206,
      "step": 270
    },
    {
      "dpo_losses": 0.655379593372345,
      "epoch": 0.79,
      "grad_norm": 1.734695130092448,
      "learning_rate": 6.515023221586721e-08,
      "logits/chosen": -2.7713265419006348,
      "logits/rejected": -2.7317543029785156,
      "logps/chosen": -293.76214599609375,
      "logps/rejected": -270.7189025878906,
      "loss": 0.6575,
      "positive_losses": 0.04788818210363388,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 0.09694816917181015,
      "rewards/margins": 0.07816567271947861,
      "rewards/margins_max": 0.14518895745277405,
      "rewards/margins_min": 0.019240325316786766,
      "rewards/margins_std": 0.057953737676143646,
      "rewards/rejected": 0.018782509490847588,
      "step": 280
    },
    {
      "dpo_losses": 0.6595104336738586,
      "epoch": 0.82,
      "grad_norm": 1.7562807996771055,
      "learning_rate": 4.949643185335287e-08,
      "logits/chosen": -2.7762978076934814,
      "logits/rejected": -2.7266323566436768,
      "logps/chosen": -278.39996337890625,
      "logps/rejected": -262.4858093261719,
      "loss": 0.6646,
      "positive_losses": 0.06580867618322372,
      "rewards/accuracies": 0.875,
      "rewards/chosen": 0.08516252040863037,
      "rewards/margins": 0.06983821839094162,
      "rewards/margins_max": 0.15399742126464844,
      "rewards/margins_min": 0.005857336334884167,
      "rewards/margins_std": 0.06812156736850739,
      "rewards/rejected": 0.015324289910495281,
      "step": 290
    },
    {
      "dpo_losses": 0.6477023363113403,
      "epoch": 0.85,
      "grad_norm": 1.9324048018602529,
      "learning_rate": 3.578570595810274e-08,
      "logits/chosen": -2.8429884910583496,
      "logits/rejected": -2.765357732772827,
      "logps/chosen": -335.2912292480469,
      "logps/rejected": -287.69110107421875,
      "loss": 0.6479,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.1122179627418518,
      "rewards/margins": 0.09479531645774841,
      "rewards/margins_max": 0.18769413232803345,
      "rewards/margins_min": 0.015501707792282104,
      "rewards/margins_std": 0.07714836299419403,
      "rewards/rejected": 0.017422644421458244,
      "step": 300
    },
    {
      "epoch": 0.85,
      "eval_dpo_losses": 0.6845089793205261,
      "eval_logits/chosen": -2.785942554473877,
      "eval_logits/rejected": -2.7478015422821045,
      "eval_logps/chosen": -279.3167724609375,
      "eval_logps/rejected": -255.1217498779297,
      "eval_loss": 0.702374279499054,
      "eval_positive_losses": 0.15923255681991577,
      "eval_rewards/accuracies": 0.593999981880188,
      "eval_rewards/chosen": 0.05276622623205185,
      "eval_rewards/margins": 0.01819423958659172,
      "eval_rewards/margins_max": 0.10716991871595383,
      "eval_rewards/margins_min": -0.059129390865564346,
      "eval_rewards/margins_std": 0.055548474192619324,
      "eval_rewards/rejected": 0.03457198664546013,
      "eval_runtime": 428.1446,
      "eval_samples_per_second": 4.671,
      "eval_steps_per_second": 0.292,
      "step": 300
    },
    {
      "dpo_losses": 0.6494620442390442,
      "epoch": 0.87,
      "grad_norm": 1.7775417343369382,
      "learning_rate": 2.415092479103503e-08,
      "logits/chosen": -2.8399946689605713,
      "logits/rejected": -2.7444241046905518,
      "logps/chosen": -287.1294250488281,
      "logps/rejected": -212.641845703125,
      "loss": 0.6483,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": 0.10686449706554413,
      "rewards/margins": 0.0910436287522316,
      "rewards/margins_max": 0.18475615978240967,
      "rewards/margins_min": 0.017825862392783165,
      "rewards/margins_std": 0.07425465434789658,
      "rewards/rejected": 0.015820873901247978,
      "step": 310
    },
    {
      "dpo_losses": 0.654121994972229,
      "epoch": 0.9,
      "grad_norm": 1.8615740426477174,
      "learning_rate": 1.4704840690808656e-08,
      "logits/chosen": -2.8066587448120117,
      "logits/rejected": -2.7482457160949707,
      "logps/chosen": -298.34295654296875,
      "logps/rejected": -258.84478759765625,
      "loss": 0.6527,
      "positive_losses": 0.004542541690170765,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 0.09930218756198883,
      "rewards/margins": 0.0810491144657135,
      "rewards/margins_max": 0.17170652747154236,
      "rewards/margins_min": 0.012719864025712013,
      "rewards/margins_std": 0.07160074263811111,
      "rewards/rejected": 0.018253061920404434,
      "step": 320
    },
    {
      "dpo_losses": 0.6466237306594849,
      "epoch": 0.93,
      "grad_norm": 1.9427490502829974,
      "learning_rate": 7.538995394063995e-09,
      "logits/chosen": -2.8800158500671387,
      "logits/rejected": -2.7991366386413574,
      "logps/chosen": -343.4425964355469,
      "logps/rejected": -265.7518005371094,
      "loss": 0.6494,
      "positive_losses": 0.0037406920455396175,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 0.10877249389886856,
      "rewards/margins": 0.09689434617757797,
      "rewards/margins_max": 0.17881345748901367,
      "rewards/margins_min": 0.022731659933924675,
      "rewards/margins_std": 0.07121957838535309,
      "rewards/rejected": 0.011878135614097118,
      "step": 330
    },
    {
      "dpo_losses": 0.6502379179000854,
      "epoch": 0.96,
      "grad_norm": 1.916115854424015,
      "learning_rate": 2.7228329070159705e-09,
      "logits/chosen": -2.767482280731201,
      "logits/rejected": -2.7154014110565186,
      "logps/chosen": -289.31561279296875,
      "logps/rejected": -249.5873260498047,
      "loss": 0.6484,
      "positive_losses": 0.02227325364947319,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": 0.10529886186122894,
      "rewards/margins": 0.08947577327489853,
      "rewards/margins_max": 0.1924385130405426,
      "rewards/margins_min": 0.021350277587771416,
      "rewards/margins_std": 0.07764019072055817,
      "rewards/rejected": 0.01582309789955616,
      "step": 340
    },
    {
      "dpo_losses": 0.6559463143348694,
      "epoch": 0.99,
      "grad_norm": 5.156600662792089,
      "learning_rate": 3.0302652553296226e-10,
      "logits/chosen": -2.750614881515503,
      "logits/rejected": -2.7069051265716553,
      "logps/chosen": -305.8713073730469,
      "logps/rejected": -285.0591735839844,
      "loss": 0.6565,
      "positive_losses": 0.04594574123620987,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 0.09087814390659332,
      "rewards/margins": 0.07695063203573227,
      "rewards/margins_max": 0.1514495462179184,
      "rewards/margins_min": 0.016952747479081154,
      "rewards/margins_std": 0.06040460616350174,
      "rewards/rejected": 0.01392750721424818,
      "step": 350
    },
    {
      "epoch": 1.0,
      "step": 355,
      "total_flos": 0.0,
      "train_loss": 0.6688590748209349,
      "train_runtime": 4310.2257,
      "train_samples_per_second": 1.317,
      "train_steps_per_second": 0.082
    }
  ],
  "logging_steps": 10,
  "max_steps": 355,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}