File size: 33,398 Bytes
b4276b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9993425378040762,
  "eval_steps": 100,
  "global_step": 570,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "grad_norm": 288.1190594115736,
      "learning_rate": 8.771929824561403e-09,
      "logits/chosen": 110.2152099609375,
      "logits/rejected": 102.90592193603516,
      "logps/chosen": -522.5816650390625,
      "logps/rejected": -417.8726501464844,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.02,
      "grad_norm": 275.85410641713264,
      "learning_rate": 8.771929824561403e-08,
      "logits/chosen": 126.02606964111328,
      "logits/rejected": 117.09617614746094,
      "logps/chosen": -389.89422607421875,
      "logps/rejected": -354.1610412597656,
      "loss": 0.7792,
      "rewards/accuracies": 0.4652777910232544,
      "rewards/chosen": 0.04768397659063339,
      "rewards/margins": 0.08727537095546722,
      "rewards/rejected": -0.039591409265995026,
      "step": 10
    },
    {
      "epoch": 0.04,
      "grad_norm": 217.40628820586957,
      "learning_rate": 1.7543859649122805e-07,
      "logits/chosen": 129.68209838867188,
      "logits/rejected": 113.68559265136719,
      "logps/chosen": -432.5853576660156,
      "logps/rejected": -373.7879943847656,
      "loss": 0.7513,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": 0.031696923077106476,
      "rewards/margins": 0.08473679423332214,
      "rewards/rejected": -0.05303985998034477,
      "step": 20
    },
    {
      "epoch": 0.05,
      "grad_norm": 191.89370217443937,
      "learning_rate": 2.631578947368421e-07,
      "logits/chosen": 124.4021987915039,
      "logits/rejected": 125.64024353027344,
      "logps/chosen": -399.22802734375,
      "logps/rejected": -390.37579345703125,
      "loss": 0.7037,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -0.20258919894695282,
      "rewards/margins": 0.1700224131345749,
      "rewards/rejected": -0.3726116120815277,
      "step": 30
    },
    {
      "epoch": 0.07,
      "grad_norm": 211.6351305832278,
      "learning_rate": 3.508771929824561e-07,
      "logits/chosen": 131.0064697265625,
      "logits/rejected": 123.0418930053711,
      "logps/chosen": -419.60662841796875,
      "logps/rejected": -402.06634521484375,
      "loss": 0.6797,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -0.7152727842330933,
      "rewards/margins": 0.4604054391384125,
      "rewards/rejected": -1.1756783723831177,
      "step": 40
    },
    {
      "epoch": 0.09,
      "grad_norm": 484.38349273880505,
      "learning_rate": 4.3859649122807013e-07,
      "logits/chosen": 128.71829223632812,
      "logits/rejected": 118.0994873046875,
      "logps/chosen": -448.260498046875,
      "logps/rejected": -412.1876525878906,
      "loss": 0.695,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -1.3968048095703125,
      "rewards/margins": 0.8602328300476074,
      "rewards/rejected": -2.25703763961792,
      "step": 50
    },
    {
      "epoch": 0.11,
      "grad_norm": 182.40125166565076,
      "learning_rate": 4.999578104083306e-07,
      "logits/chosen": 127.1559066772461,
      "logits/rejected": 121.3763427734375,
      "logps/chosen": -435.3233337402344,
      "logps/rejected": -415.9839782714844,
      "loss": 0.6133,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -1.5324429273605347,
      "rewards/margins": 1.0619370937347412,
      "rewards/rejected": -2.5943799018859863,
      "step": 60
    },
    {
      "epoch": 0.12,
      "grad_norm": 199.29717720826676,
      "learning_rate": 4.992081692902698e-07,
      "logits/chosen": 120.18775939941406,
      "logits/rejected": 117.95320129394531,
      "logps/chosen": -406.31915283203125,
      "logps/rejected": -413.50787353515625,
      "loss": 0.5865,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.46991199254989624,
      "rewards/margins": 1.2349997758865356,
      "rewards/rejected": -1.7049118280410767,
      "step": 70
    },
    {
      "epoch": 0.14,
      "grad_norm": 207.06449323027374,
      "learning_rate": 4.975242169652915e-07,
      "logits/chosen": 128.04473876953125,
      "logits/rejected": 130.7515869140625,
      "logps/chosen": -447.3434143066406,
      "logps/rejected": -459.81890869140625,
      "loss": 0.6004,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -2.0434231758117676,
      "rewards/margins": 1.4744064807891846,
      "rewards/rejected": -3.5178298950195312,
      "step": 80
    },
    {
      "epoch": 0.16,
      "grad_norm": 200.92304707212335,
      "learning_rate": 4.949122667718934e-07,
      "logits/chosen": 121.19368743896484,
      "logits/rejected": 121.2099380493164,
      "logps/chosen": -397.20086669921875,
      "logps/rejected": -414.5176696777344,
      "loss": 0.6301,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -2.6995744705200195,
      "rewards/margins": 1.4437432289123535,
      "rewards/rejected": -4.143317222595215,
      "step": 90
    },
    {
      "epoch": 0.18,
      "grad_norm": 215.04289608607098,
      "learning_rate": 4.913821112234773e-07,
      "logits/chosen": 116.49479675292969,
      "logits/rejected": 112.4799575805664,
      "logps/chosen": -447.57647705078125,
      "logps/rejected": -429.46575927734375,
      "loss": 0.6146,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -2.545346260070801,
      "rewards/margins": 1.2528657913208008,
      "rewards/rejected": -3.7982120513916016,
      "step": 100
    },
    {
      "epoch": 0.18,
      "eval_logits/chosen": 81.87950897216797,
      "eval_logits/rejected": 84.39910125732422,
      "eval_logps/chosen": -411.71240234375,
      "eval_logps/rejected": -420.9197082519531,
      "eval_loss": 0.4651451110839844,
      "eval_rewards/accuracies": 0.7526595592498779,
      "eval_rewards/chosen": -2.3365964889526367,
      "eval_rewards/margins": 1.8754801750183105,
      "eval_rewards/rejected": -4.2120771408081055,
      "eval_runtime": 140.5275,
      "eval_samples_per_second": 21.348,
      "eval_steps_per_second": 0.669,
      "step": 100
    },
    {
      "epoch": 0.19,
      "grad_norm": 145.22259820264708,
      "learning_rate": 4.869469852950461e-07,
      "logits/chosen": 122.2143325805664,
      "logits/rejected": 105.96075439453125,
      "logps/chosen": -432.1094665527344,
      "logps/rejected": -416.0596618652344,
      "loss": 0.5204,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -2.683460235595703,
      "rewards/margins": 1.542055606842041,
      "rewards/rejected": -4.225515842437744,
      "step": 110
    },
    {
      "epoch": 0.21,
      "grad_norm": 156.26979860678045,
      "learning_rate": 4.816235168037004e-07,
      "logits/chosen": 128.47006225585938,
      "logits/rejected": 118.56982421875,
      "logps/chosen": -449.61700439453125,
      "logps/rejected": -434.48028564453125,
      "loss": 0.6423,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -3.0041728019714355,
      "rewards/margins": 1.7394739389419556,
      "rewards/rejected": -4.74364709854126,
      "step": 120
    },
    {
      "epoch": 0.23,
      "grad_norm": 177.86144513954326,
      "learning_rate": 4.754316640689664e-07,
      "logits/chosen": 113.59197998046875,
      "logits/rejected": 112.35980224609375,
      "logps/chosen": -457.04144287109375,
      "logps/rejected": -439.47723388671875,
      "loss": 0.5826,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -2.8894424438476562,
      "rewards/margins": 1.4542138576507568,
      "rewards/rejected": -4.343656539916992,
      "step": 130
    },
    {
      "epoch": 0.25,
      "grad_norm": 189.02971648014739,
      "learning_rate": 4.683946410866696e-07,
      "logits/chosen": 116.56878662109375,
      "logits/rejected": 122.89111328125,
      "logps/chosen": -420.02703857421875,
      "logps/rejected": -411.07720947265625,
      "loss": 0.6118,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -3.1426749229431152,
      "rewards/margins": 1.2650305032730103,
      "rewards/rejected": -4.407705307006836,
      "step": 140
    },
    {
      "epoch": 0.26,
      "grad_norm": 211.31450916072063,
      "learning_rate": 4.605388304968914e-07,
      "logits/chosen": 118.91947937011719,
      "logits/rejected": 116.5951156616211,
      "logps/chosen": -431.5098571777344,
      "logps/rejected": -429.95440673828125,
      "loss": 0.5911,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -2.910405397415161,
      "rewards/margins": 1.1865342855453491,
      "rewards/rejected": -4.096939563751221,
      "step": 150
    },
    {
      "epoch": 0.28,
      "grad_norm": 170.26872451717134,
      "learning_rate": 4.518936846722982e-07,
      "logits/chosen": 117.84376525878906,
      "logits/rejected": 112.31922912597656,
      "logps/chosen": -414.76165771484375,
      "logps/rejected": -437.95452880859375,
      "loss": 0.5456,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -1.7054227590560913,
      "rewards/margins": 2.110852003097534,
      "rewards/rejected": -3.816274642944336,
      "step": 160
    },
    {
      "epoch": 0.3,
      "grad_norm": 191.46259689342833,
      "learning_rate": 4.424916152976768e-07,
      "logits/chosen": 119.23990631103516,
      "logits/rejected": 121.25250244140625,
      "logps/chosen": -443.57867431640625,
      "logps/rejected": -435.184326171875,
      "loss": 0.5512,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -1.7071870565414429,
      "rewards/margins": 1.8656721115112305,
      "rewards/rejected": -3.572859287261963,
      "step": 170
    },
    {
      "epoch": 0.32,
      "grad_norm": 184.73035643974228,
      "learning_rate": 4.323678718546552e-07,
      "logits/chosen": 108.43327331542969,
      "logits/rejected": 114.77632141113281,
      "logps/chosen": -438.0908203125,
      "logps/rejected": -462.9315490722656,
      "loss": 0.6132,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -2.3591582775115967,
      "rewards/margins": 2.4720942974090576,
      "rewards/rejected": -4.8312530517578125,
      "step": 180
    },
    {
      "epoch": 0.33,
      "grad_norm": 158.74860587248762,
      "learning_rate": 4.2156040946718343e-07,
      "logits/chosen": 123.97856140136719,
      "logits/rejected": 116.08268737792969,
      "logps/chosen": -403.24920654296875,
      "logps/rejected": -421.84698486328125,
      "loss": 0.5587,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -1.5052869319915771,
      "rewards/margins": 1.7390124797821045,
      "rewards/rejected": -3.2442994117736816,
      "step": 190
    },
    {
      "epoch": 0.35,
      "grad_norm": 183.38314527086422,
      "learning_rate": 4.1010974660323827e-07,
      "logits/chosen": 126.38175964355469,
      "logits/rejected": 122.32222747802734,
      "logps/chosen": -436.79522705078125,
      "logps/rejected": -426.5311584472656,
      "loss": 0.5464,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.6060012578964233,
      "rewards/margins": 1.8500821590423584,
      "rewards/rejected": -2.456083297729492,
      "step": 200
    },
    {
      "epoch": 0.35,
      "eval_logits/chosen": 82.9056625366211,
      "eval_logits/rejected": 84.87641143798828,
      "eval_logps/chosen": -396.1968078613281,
      "eval_logps/rejected": -410.65625,
      "eval_loss": 0.4531392455101013,
      "eval_rewards/accuracies": 0.789893627166748,
      "eval_rewards/chosen": -0.785041868686676,
      "eval_rewards/margins": 2.400688648223877,
      "eval_rewards/rejected": -3.185730218887329,
      "eval_runtime": 137.5235,
      "eval_samples_per_second": 21.814,
      "eval_steps_per_second": 0.684,
      "step": 200
    },
    {
      "epoch": 0.37,
      "grad_norm": 168.64841301750204,
      "learning_rate": 3.9805881316624503e-07,
      "logits/chosen": 130.578857421875,
      "logits/rejected": 128.09909057617188,
      "logps/chosen": -438.5977478027344,
      "logps/rejected": -418.1968688964844,
      "loss": 0.5578,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.4819152355194092,
      "rewards/margins": 1.8128970861434937,
      "rewards/rejected": -3.2948124408721924,
      "step": 210
    },
    {
      "epoch": 0.39,
      "grad_norm": 136.21776962330497,
      "learning_rate": 3.8545278954573936e-07,
      "logits/chosen": 113.0008773803711,
      "logits/rejected": 117.78938293457031,
      "logps/chosen": -455.12457275390625,
      "logps/rejected": -453.49786376953125,
      "loss": 0.5577,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -2.3700404167175293,
      "rewards/margins": 2.080824375152588,
      "rewards/rejected": -4.450865268707275,
      "step": 220
    },
    {
      "epoch": 0.4,
      "grad_norm": 168.04155893000748,
      "learning_rate": 3.7233893723068785e-07,
      "logits/chosen": 117.96110534667969,
      "logits/rejected": 112.2010726928711,
      "logps/chosen": -417.343994140625,
      "logps/rejected": -411.2234802246094,
      "loss": 0.5906,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -2.2008044719696045,
      "rewards/margins": 1.7120252847671509,
      "rewards/rejected": -3.912829875946045,
      "step": 230
    },
    {
      "epoch": 0.42,
      "grad_norm": 233.13263556391968,
      "learning_rate": 3.587664216205183e-07,
      "logits/chosen": 111.51997375488281,
      "logits/rejected": 105.1529312133789,
      "logps/chosen": -435.8033142089844,
      "logps/rejected": -414.79608154296875,
      "loss": 0.5228,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -1.822892189025879,
      "rewards/margins": 1.6441761255264282,
      "rewards/rejected": -3.4670684337615967,
      "step": 240
    },
    {
      "epoch": 0.44,
      "grad_norm": 145.93578202633324,
      "learning_rate": 3.447861276981619e-07,
      "logits/chosen": 112.54423522949219,
      "logits/rejected": 114.58158874511719,
      "logps/chosen": -433.91717529296875,
      "logps/rejected": -451.19140625,
      "loss": 0.4728,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -2.1698975563049316,
      "rewards/margins": 2.254122018814087,
      "rewards/rejected": -4.424019813537598,
      "step": 250
    },
    {
      "epoch": 0.46,
      "grad_norm": 265.64870689354916,
      "learning_rate": 3.304504692561714e-07,
      "logits/chosen": 107.4307632446289,
      "logits/rejected": 109.19493103027344,
      "logps/chosen": -384.3638916015625,
      "logps/rejected": -407.864013671875,
      "loss": 0.572,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -1.458132028579712,
      "rewards/margins": 2.150017499923706,
      "rewards/rejected": -3.608149766921997,
      "step": 260
    },
    {
      "epoch": 0.47,
      "grad_norm": 188.67031809112055,
      "learning_rate": 3.1581319239114976e-07,
      "logits/chosen": 117.85128021240234,
      "logits/rejected": 116.6944580078125,
      "logps/chosen": -415.9629821777344,
      "logps/rejected": -407.85235595703125,
      "loss": 0.5564,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.2570802569389343,
      "rewards/margins": 2.1505465507507324,
      "rewards/rejected": -2.4076266288757324,
      "step": 270
    },
    {
      "epoch": 0.49,
      "grad_norm": 165.06348521901816,
      "learning_rate": 3.0092917400321105e-07,
      "logits/chosen": 128.14698791503906,
      "logits/rejected": 121.8365478515625,
      "logps/chosen": -417.6142578125,
      "logps/rejected": -403.83367919921875,
      "loss": 0.5676,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.7323893308639526,
      "rewards/margins": 1.7381248474121094,
      "rewards/rejected": -2.4705142974853516,
      "step": 280
    },
    {
      "epoch": 0.51,
      "grad_norm": 157.00916744994808,
      "learning_rate": 2.8585421605592406e-07,
      "logits/chosen": 110.540283203125,
      "logits/rejected": 115.08995056152344,
      "logps/chosen": -396.43426513671875,
      "logps/rejected": -436.2679138183594,
      "loss": 0.5495,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.4560773968696594,
      "rewards/margins": 1.98747980594635,
      "rewards/rejected": -2.4435572624206543,
      "step": 290
    },
    {
      "epoch": 0.53,
      "grad_norm": 205.45827003907002,
      "learning_rate": 2.706448363680831e-07,
      "logits/chosen": 124.5927963256836,
      "logits/rejected": 110.01478576660156,
      "logps/chosen": -446.9676818847656,
      "logps/rejected": -425.59130859375,
      "loss": 0.5841,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -1.1815617084503174,
      "rewards/margins": 2.0572218894958496,
      "rewards/rejected": -3.238783597946167,
      "step": 300
    },
    {
      "epoch": 0.53,
      "eval_logits/chosen": 81.92237091064453,
      "eval_logits/rejected": 83.76121520996094,
      "eval_logps/chosen": -404.27252197265625,
      "eval_logps/rejected": -421.2023010253906,
      "eval_loss": 0.420937716960907,
      "eval_rewards/accuracies": 0.8085106611251831,
      "eval_rewards/chosen": -1.5926097631454468,
      "eval_rewards/margins": 2.6477227210998535,
      "eval_rewards/rejected": -4.24033260345459,
      "eval_runtime": 140.4063,
      "eval_samples_per_second": 21.367,
      "eval_steps_per_second": 0.669,
      "step": 300
    },
    {
      "epoch": 0.54,
      "grad_norm": 160.60055964808268,
      "learning_rate": 2.5535805672165076e-07,
      "logits/chosen": 137.0350799560547,
      "logits/rejected": 120.5669174194336,
      "logps/chosen": -484.946533203125,
      "logps/rejected": -442.79840087890625,
      "loss": 0.531,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -1.6490952968597412,
      "rewards/margins": 2.217057943344116,
      "rewards/rejected": -3.8661532402038574,
      "step": 310
    },
    {
      "epoch": 0.56,
      "grad_norm": 185.28720083745498,
      "learning_rate": 2.4005118908028396e-07,
      "logits/chosen": 124.60655212402344,
      "logits/rejected": 121.116455078125,
      "logps/chosen": -404.44512939453125,
      "logps/rejected": -417.16912841796875,
      "loss": 0.5466,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.48712271451950073,
      "rewards/margins": 2.2081336975097656,
      "rewards/rejected": -2.695256471633911,
      "step": 320
    },
    {
      "epoch": 0.58,
      "grad_norm": 126.54706641028886,
      "learning_rate": 2.2478162071993296e-07,
      "logits/chosen": 120.48885345458984,
      "logits/rejected": 124.61685943603516,
      "logps/chosen": -399.6452941894531,
      "logps/rejected": -414.8997497558594,
      "loss": 0.4872,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.09031596034765244,
      "rewards/margins": 2.396162509918213,
      "rewards/rejected": -2.486478567123413,
      "step": 330
    },
    {
      "epoch": 0.6,
      "grad_norm": 178.09627633035737,
      "learning_rate": 2.096065990770863e-07,
      "logits/chosen": 122.05961608886719,
      "logits/rejected": 118.81587982177734,
      "logps/chosen": -394.23077392578125,
      "logps/rejected": -430.9129943847656,
      "loss": 0.5554,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.9867365956306458,
      "rewards/margins": 1.8670978546142578,
      "rewards/rejected": -2.8538341522216797,
      "step": 340
    },
    {
      "epoch": 0.61,
      "grad_norm": 177.15865509490735,
      "learning_rate": 1.9458301712129033e-07,
      "logits/chosen": 119.64925384521484,
      "logits/rejected": 119.57633972167969,
      "logps/chosen": -411.25408935546875,
      "logps/rejected": -429.3443298339844,
      "loss": 0.5122,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.9517320394515991,
      "rewards/margins": 2.0737788677215576,
      "rewards/rejected": -3.0255112648010254,
      "step": 350
    },
    {
      "epoch": 0.63,
      "grad_norm": 139.47726339895294,
      "learning_rate": 1.7976720005660767e-07,
      "logits/chosen": 113.1412582397461,
      "logits/rejected": 101.12696838378906,
      "logps/chosen": -406.71917724609375,
      "logps/rejected": -391.29437255859375,
      "loss": 0.5455,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.5302667617797852,
      "rewards/margins": 2.25763201713562,
      "rewards/rejected": -2.787898540496826,
      "step": 360
    },
    {
      "epoch": 0.65,
      "grad_norm": 133.36840849054562,
      "learning_rate": 1.652146941516963e-07,
      "logits/chosen": 114.43309020996094,
      "logits/rejected": 119.9453353881836,
      "logps/chosen": -399.7528381347656,
      "logps/rejected": -420.86297607421875,
      "loss": 0.4984,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.21387052536010742,
      "rewards/margins": 2.155322313308716,
      "rewards/rejected": -2.3691928386688232,
      "step": 370
    },
    {
      "epoch": 0.67,
      "grad_norm": 149.00043137049042,
      "learning_rate": 1.5098005849021078e-07,
      "logits/chosen": 127.96968841552734,
      "logits/rejected": 125.54051208496094,
      "logps/chosen": -389.4139404296875,
      "logps/rejected": -413.8482971191406,
      "loss": 0.5179,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.6685792803764343,
      "rewards/margins": 2.1137466430664062,
      "rewards/rejected": -2.7823262214660645,
      "step": 380
    },
    {
      "epoch": 0.68,
      "grad_norm": 186.72529615905967,
      "learning_rate": 1.371166604222777e-07,
      "logits/chosen": 119.5155258178711,
      "logits/rejected": 120.4027328491211,
      "logps/chosen": -425.53814697265625,
      "logps/rejected": -438.1182556152344,
      "loss": 0.5434,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -1.0980108976364136,
      "rewards/margins": 1.9416344165802002,
      "rewards/rejected": -3.039644956588745,
      "step": 390
    },
    {
      "epoch": 0.7,
      "grad_norm": 199.20784502094995,
      "learning_rate": 1.236764754839226e-07,
      "logits/chosen": 123.8387680053711,
      "logits/rejected": 123.20845794677734,
      "logps/chosen": -452.08056640625,
      "logps/rejected": -414.00054931640625,
      "loss": 0.519,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -1.4723873138427734,
      "rewards/margins": 2.1262927055358887,
      "rewards/rejected": -3.598680019378662,
      "step": 400
    },
    {
      "epoch": 0.7,
      "eval_logits/chosen": 84.7816162109375,
      "eval_logits/rejected": 85.82012176513672,
      "eval_logps/chosen": -400.7308044433594,
      "eval_logps/rejected": -420.57318115234375,
      "eval_loss": 0.41622278094291687,
      "eval_rewards/accuracies": 0.7819148898124695,
      "eval_rewards/chosen": -1.2384368181228638,
      "eval_rewards/margins": 2.938983917236328,
      "eval_rewards/rejected": -4.1774210929870605,
      "eval_runtime": 139.5411,
      "eval_samples_per_second": 21.499,
      "eval_steps_per_second": 0.674,
      "step": 400
    },
    {
      "epoch": 0.72,
      "grad_norm": 134.84528611140695,
      "learning_rate": 1.1070989253457461e-07,
      "logits/chosen": 122.2228012084961,
      "logits/rejected": 110.46368408203125,
      "logps/chosen": -441.6568298339844,
      "logps/rejected": -433.2784118652344,
      "loss": 0.5219,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.0036917924880981,
      "rewards/margins": 2.34743595123291,
      "rewards/rejected": -3.3511276245117188,
      "step": 410
    },
    {
      "epoch": 0.74,
      "grad_norm": 133.1187583799304,
      "learning_rate": 9.826552484321085e-08,
      "logits/chosen": 132.171630859375,
      "logits/rejected": 129.95004272460938,
      "logps/chosen": -417.84796142578125,
      "logps/rejected": -423.4884338378906,
      "loss": 0.5061,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.618304967880249,
      "rewards/margins": 2.553462266921997,
      "rewards/rejected": -3.171767234802246,
      "step": 420
    },
    {
      "epoch": 0.75,
      "grad_norm": 573.6152142738256,
      "learning_rate": 8.639002783140181e-08,
      "logits/chosen": 118.46337890625,
      "logits/rejected": 109.05899810791016,
      "logps/chosen": -426.46795654296875,
      "logps/rejected": -418.7447814941406,
      "loss": 0.5267,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.6656720042228699,
      "rewards/margins": 2.3314335346221924,
      "rewards/rejected": -2.997105598449707,
      "step": 430
    },
    {
      "epoch": 0.77,
      "grad_norm": 148.45628734093913,
      "learning_rate": 7.512792415656055e-08,
      "logits/chosen": 119.17752838134766,
      "logits/rejected": 121.80256652832031,
      "logps/chosen": -419.00714111328125,
      "logps/rejected": -397.78277587890625,
      "loss": 0.5312,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.8773514628410339,
      "rewards/margins": 1.9682013988494873,
      "rewards/rejected": -2.845552921295166,
      "step": 440
    },
    {
      "epoch": 0.79,
      "grad_norm": 162.12056581720563,
      "learning_rate": 6.452143679117964e-08,
      "logits/chosen": 112.10150146484375,
      "logits/rejected": 113.61590576171875,
      "logps/chosen": -399.46868896484375,
      "logps/rejected": -410.13604736328125,
      "loss": 0.4931,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.21312670409679413,
      "rewards/margins": 2.5830297470092773,
      "rewards/rejected": -2.796156406402588,
      "step": 450
    },
    {
      "epoch": 0.81,
      "grad_norm": 126.77510481617468,
      "learning_rate": 5.46103307238617e-08,
      "logits/chosen": 123.6783447265625,
      "logits/rejected": 120.97247314453125,
      "logps/chosen": -415.7288513183594,
      "logps/rejected": -401.6763610839844,
      "loss": 0.5306,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.10093896090984344,
      "rewards/margins": 1.8561251163482666,
      "rewards/rejected": -1.7551860809326172,
      "step": 460
    },
    {
      "epoch": 0.82,
      "grad_norm": 150.71507301379341,
      "learning_rate": 4.5431763875625226e-08,
      "logits/chosen": 116.371826171875,
      "logits/rejected": 127.0480728149414,
      "logps/chosen": -382.4268798828125,
      "logps/rejected": -425.90899658203125,
      "loss": 0.5385,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": 0.36557143926620483,
      "rewards/margins": 2.3377490043640137,
      "rewards/rejected": -1.972177267074585,
      "step": 470
    },
    {
      "epoch": 0.84,
      "grad_norm": 158.81620899731348,
      "learning_rate": 3.702014779041826e-08,
      "logits/chosen": 123.89225769042969,
      "logits/rejected": 123.4906234741211,
      "logps/chosen": -424.238525390625,
      "logps/rejected": -413.3152770996094,
      "loss": 0.5397,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": 0.1715961992740631,
      "rewards/margins": 2.171330213546753,
      "rewards/rejected": -1.9997339248657227,
      "step": 480
    },
    {
      "epoch": 0.86,
      "grad_norm": 146.54123732916176,
      "learning_rate": 2.940701862212802e-08,
      "logits/chosen": 126.18601989746094,
      "logits/rejected": 121.62428283691406,
      "logps/chosen": -400.54058837890625,
      "logps/rejected": -408.0806884765625,
      "loss": 0.5078,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.3071042597293854,
      "rewards/margins": 1.9608478546142578,
      "rewards/rejected": -2.2679522037506104,
      "step": 490
    },
    {
      "epoch": 0.88,
      "grad_norm": 179.72110486843388,
      "learning_rate": 2.2620918901771507e-08,
      "logits/chosen": 123.40205383300781,
      "logits/rejected": 128.0767059326172,
      "logps/chosen": -391.4453125,
      "logps/rejected": -411.9076232910156,
      "loss": 0.5432,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.5281409621238708,
      "rewards/margins": 1.8229057788848877,
      "rewards/rejected": -2.3510470390319824,
      "step": 500
    },
    {
      "epoch": 0.88,
      "eval_logits/chosen": 82.89906311035156,
      "eval_logits/rejected": 83.63628387451172,
      "eval_logps/chosen": -392.10986328125,
      "eval_logps/rejected": -413.8585510253906,
      "eval_loss": 0.4134235084056854,
      "eval_rewards/accuracies": 0.8031914830207825,
      "eval_rewards/chosen": -0.3763478994369507,
      "eval_rewards/margins": 3.1296117305755615,
      "eval_rewards/rejected": -3.5059597492218018,
      "eval_runtime": 139.1588,
      "eval_samples_per_second": 21.558,
      "eval_steps_per_second": 0.675,
      "step": 500
    },
    {
      "epoch": 0.89,
      "grad_norm": 136.393779166859,
      "learning_rate": 1.6687290528135722e-08,
      "logits/chosen": 125.94908142089844,
      "logits/rejected": 125.49617004394531,
      "logps/chosen": -416.5887756347656,
      "logps/rejected": -427.69500732421875,
      "loss": 0.5234,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.5508764386177063,
      "rewards/margins": 2.112180471420288,
      "rewards/rejected": -2.6630568504333496,
      "step": 510
    },
    {
      "epoch": 0.91,
      "grad_norm": 146.29141931806384,
      "learning_rate": 1.1628379383059022e-08,
      "logits/chosen": 120.80562591552734,
      "logits/rejected": 115.93048095703125,
      "logps/chosen": -371.3426513671875,
      "logps/rejected": -387.1521301269531,
      "loss": 0.4969,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.32862672209739685,
      "rewards/margins": 2.9703025817871094,
      "rewards/rejected": -3.298929214477539,
      "step": 520
    },
    {
      "epoch": 0.93,
      "grad_norm": 147.11717811510456,
      "learning_rate": 7.463151928961548e-09,
      "logits/chosen": 122.363037109375,
      "logits/rejected": 109.66593933105469,
      "logps/chosen": -448.3584899902344,
      "logps/rejected": -436.228515625,
      "loss": 0.5469,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.46789926290512085,
      "rewards/margins": 2.4167771339416504,
      "rewards/rejected": -2.884676933288574,
      "step": 530
    },
    {
      "epoch": 0.95,
      "grad_norm": 155.0350657623424,
      "learning_rate": 4.207224101311246e-09,
      "logits/chosen": 117.22102355957031,
      "logits/rejected": 120.94425201416016,
      "logps/chosen": -442.1240234375,
      "logps/rejected": -422.91839599609375,
      "loss": 0.5662,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.7507426738739014,
      "rewards/margins": 2.2546093463897705,
      "rewards/rejected": -3.005352020263672,
      "step": 540
    },
    {
      "epoch": 0.96,
      "grad_norm": 152.83811169959978,
      "learning_rate": 1.8728027626156997e-09,
      "logits/chosen": 125.8171157836914,
      "logits/rejected": 120.76863861083984,
      "logps/chosen": -421.21453857421875,
      "logps/rejected": -413.3653869628906,
      "loss": 0.5051,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.43215274810791016,
      "rewards/margins": 2.5282857418060303,
      "rewards/rejected": -2.9604387283325195,
      "step": 550
    },
    {
      "epoch": 0.98,
      "grad_norm": 119.16575693204335,
      "learning_rate": 4.686399374358441e-10,
      "logits/chosen": 125.23756408691406,
      "logits/rejected": 120.61265563964844,
      "logps/chosen": -421.6220703125,
      "logps/rejected": -447.9476013183594,
      "loss": 0.4916,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.8027129173278809,
      "rewards/margins": 2.2439770698547363,
      "rewards/rejected": -3.046689987182617,
      "step": 560
    },
    {
      "epoch": 1.0,
      "grad_norm": 150.61389898443417,
      "learning_rate": 0.0,
      "logits/chosen": 126.7450942993164,
      "logits/rejected": 124.49223327636719,
      "logps/chosen": -381.63909912109375,
      "logps/rejected": -396.2301940917969,
      "loss": 0.4902,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.5853160619735718,
      "rewards/margins": 2.132859706878662,
      "rewards/rejected": -2.7181754112243652,
      "step": 570
    },
    {
      "epoch": 1.0,
      "step": 570,
      "total_flos": 0.0,
      "train_loss": 0.5623811487565961,
      "train_runtime": 6612.2068,
      "train_samples_per_second": 11.039,
      "train_steps_per_second": 0.086
    }
  ],
  "logging_steps": 10,
  "max_steps": 570,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}