hugodk-sch commited on
Commit
1c1db2d
1 Parent(s): 6525991

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +376 -376
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.5001
24
- - Rewards/chosen: 0.0029
25
- - Rewards/rejected: 0.0032
26
- - Rewards/accuracies: 0.4842
27
- - Rewards/margins: -0.0003
28
- - Logps/rejected: -37.5006
29
- - Logps/chosen: -34.0202
30
- - Logits/rejected: -2.2393
31
- - Logits/chosen: -2.2442
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.5 | 0.26 | 100 | 0.5003 | 0.0013 | 0.0021 | 0.4934 | -0.0008 | -37.5063 | -34.0280 | -2.2391 | -2.2440 |
67
- | 0.4994 | 0.52 | 200 | 0.5005 | -0.0009 | 0.0012 | 0.5021 | -0.0021 | -37.5106 | -34.0392 | -2.2401 | -2.2450 |
68
- | 0.499 | 0.78 | 300 | 0.5010 | -0.0014 | 0.0035 | 0.4431 | -0.0049 | -37.4991 | -34.0418 | -2.2397 | -2.2446 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.4998
21
+ - Rewards/chosen: 0.0042
22
+ - Rewards/rejected: 0.0036
23
+ - Rewards/accuracies: 0.5137
24
+ - Rewards/margins: 0.0006
25
+ - Logps/rejected: -37.5046
26
+ - Logps/chosen: -34.0207
27
+ - Logits/rejected: -2.2382
28
+ - Logits/chosen: -2.2431
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.4987 | 0.26 | 100 | 0.4994 | 0.0049 | 0.0020 | 0.5075 | 0.0029 | -37.5100 | -34.0182 | -2.2389 | -2.2437 |
64
+ | 0.4979 | 0.52 | 200 | 0.4998 | 0.0039 | 0.0031 | 0.4863 | 0.0008 | -37.5062 | -34.0214 | -2.2384 | -2.2432 |
65
+ | 0.4978 | 0.78 | 300 | 0.4998 | 0.0042 | 0.0036 | 0.5137 | 0.0006 | -37.5046 | -34.0207 | -2.2382 | -2.2431 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e0fe08296538d49776839e2fd3ec2743dd0adef4a1968a65578a138c0d36353
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0143633a911bfdb88e846b1d054b1bda220855de7ed3cbfb07035b381388a398
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.244190216064453,
4
- "eval_logits/rejected": -2.2393178939819336,
5
- "eval_logps/chosen": -34.020179748535156,
6
- "eval_logps/rejected": -37.50062561035156,
7
- "eval_loss": 0.500054657459259,
8
- "eval_rewards/accuracies": 0.4842192530632019,
9
- "eval_rewards/chosen": 0.002875381149351597,
10
- "eval_rewards/margins": -0.0003236339834984392,
11
- "eval_rewards/rejected": 0.0031990152783691883,
12
- "eval_runtime": 145.749,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.353,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.4997496530607149,
17
- "train_runtime": 3257.6696,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.945,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.49920677581390777,
4
+ "train_runtime": 3256.0166,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.4997496530607149,
4
- "train_runtime": 3257.6696,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.945,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.49920677581390777,
4
+ "train_runtime": 3256.0166,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.866280436515808,
29
- "logits/rejected": -1.8705918788909912,
30
- "logps/chosen": -37.00367736816406,
31
- "logps/rejected": -33.67123794555664,
32
- "loss": 0.4986,
33
- "rewards/accuracies": 0.4722222089767456,
34
- "rewards/chosen": 0.000584296474698931,
35
- "rewards/margins": 0.008817334659397602,
36
- "rewards/rejected": -0.00823303870856762,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.9974257946014404,
43
- "logits/rejected": -2.0000691413879395,
44
- "logps/chosen": -29.649478912353516,
45
- "logps/rejected": -29.038330078125,
46
- "loss": 0.5011,
47
- "rewards/accuracies": 0.3499999940395355,
48
- "rewards/chosen": -0.0014564015436917543,
49
- "rewards/margins": -0.0064794206991791725,
50
- "rewards/rejected": 0.0050230189226567745,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.9199994802474976,
57
- "logits/rejected": -1.91730535030365,
58
- "logps/chosen": -31.410457611083984,
59
- "logps/rejected": -33.23019027709961,
60
- "loss": 0.4992,
61
- "rewards/accuracies": 0.5249999761581421,
62
- "rewards/chosen": 0.0011272300034761429,
63
- "rewards/margins": 0.0034732469357550144,
64
- "rewards/rejected": -0.0023460157681256533,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.017059326171875,
71
- "logits/rejected": -2.008305788040161,
72
- "logps/chosen": -32.582275390625,
73
- "logps/rejected": -32.48918914794922,
74
- "loss": 0.5013,
75
- "rewards/accuracies": 0.4749999940395355,
76
- "rewards/chosen": -0.0010934959864243865,
77
- "rewards/margins": -0.005789449438452721,
78
- "rewards/rejected": 0.004695953335613012,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8646684885025024,
85
- "logits/rejected": -1.853882074356079,
86
- "logps/chosen": -33.567169189453125,
87
- "logps/rejected": -35.43851852416992,
88
- "loss": 0.5009,
89
- "rewards/accuracies": 0.44999998807907104,
90
- "rewards/chosen": -0.0019382357131689787,
91
- "rewards/margins": -0.00470335315912962,
92
- "rewards/rejected": 0.0027651176787912846,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9455400705337524,
99
- "logits/rejected": -1.947479486465454,
100
- "logps/chosen": -32.57770919799805,
101
- "logps/rejected": -33.181541442871094,
102
- "loss": 0.4989,
103
- "rewards/accuracies": 0.5625,
104
- "rewards/chosen": 0.004494256805628538,
105
- "rewards/margins": 0.006526687648147345,
106
- "rewards/rejected": -0.00203243107534945,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.079427719116211,
113
- "logits/rejected": -2.084414482116699,
114
- "logps/chosen": -33.99236297607422,
115
- "logps/rejected": -36.600887298583984,
116
- "loss": 0.4989,
117
- "rewards/accuracies": 0.612500011920929,
118
- "rewards/chosen": -0.0007750942604616284,
119
- "rewards/margins": 0.005627653561532497,
120
- "rewards/rejected": -0.006402746774256229,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9420673847198486,
127
- "logits/rejected": -1.9452102184295654,
128
- "logps/chosen": -34.41423416137695,
129
- "logps/rejected": -34.5662727355957,
130
- "loss": 0.5004,
131
- "rewards/accuracies": 0.44999998807907104,
132
- "rewards/chosen": 0.0027157063595950603,
133
- "rewards/margins": -0.0025114950258284807,
134
- "rewards/rejected": 0.00522720068693161,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.951348066329956,
141
- "logits/rejected": -1.9558594226837158,
142
- "logps/chosen": -32.46399688720703,
143
- "logps/rejected": -32.344329833984375,
144
- "loss": 0.5004,
145
- "rewards/accuracies": 0.4749999940395355,
146
- "rewards/chosen": 0.0006849506171420217,
147
- "rewards/margins": -0.0022309008054435253,
148
- "rewards/rejected": 0.002915852004662156,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.0500051975250244,
155
- "logits/rejected": -2.0480096340179443,
156
- "logps/chosen": -32.25367736816406,
157
- "logps/rejected": -31.2783260345459,
158
- "loss": 0.5,
159
- "rewards/accuracies": 0.48750001192092896,
160
- "rewards/chosen": -0.002567791845649481,
161
- "rewards/margins": -0.00014422755339182913,
162
- "rewards/rejected": -0.0024235642049461603,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.244023323059082,
168
- "eval_logits/rejected": -2.2391417026519775,
169
- "eval_logps/chosen": -34.02798080444336,
170
- "eval_logps/rejected": -37.506290435791016,
171
- "eval_loss": 0.500300407409668,
172
- "eval_rewards/accuracies": 0.49335551261901855,
173
- "eval_rewards/chosen": 0.0013138726353645325,
174
- "eval_rewards/margins": -0.0007514380267821252,
175
- "eval_rewards/rejected": 0.0020653100218623877,
176
- "eval_runtime": 146.2282,
177
- "eval_samples_per_second": 2.346,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.0061328411102295,
185
- "logits/rejected": -2.0037178993225098,
186
- "logps/chosen": -33.24314880371094,
187
- "logps/rejected": -34.023292541503906,
188
- "loss": 0.5003,
189
- "rewards/accuracies": 0.4375,
190
- "rewards/chosen": 0.0001343002513749525,
191
- "rewards/margins": -0.001081160269677639,
192
- "rewards/rejected": 0.0012154604773968458,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.0173094272613525,
199
- "logits/rejected": -2.008920431137085,
200
- "logps/chosen": -32.45183563232422,
201
- "logps/rejected": -32.17412185668945,
202
- "loss": 0.5002,
203
- "rewards/accuracies": 0.48750001192092896,
204
- "rewards/chosen": -0.0013863157946616411,
205
- "rewards/margins": 0.0011368464911356568,
206
- "rewards/rejected": -0.002523162867873907,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0478129386901855,
213
- "logits/rejected": -2.0397789478302,
214
- "logps/chosen": -30.514944076538086,
215
- "logps/rejected": -32.053070068359375,
216
- "loss": 0.5012,
217
- "rewards/accuracies": 0.5,
218
- "rewards/chosen": -0.007845849730074406,
219
- "rewards/margins": -0.007833347655832767,
220
- "rewards/rejected": -1.2502726349339355e-05,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.978299856185913,
227
- "logits/rejected": -1.9885809421539307,
228
- "logps/chosen": -31.413021087646484,
229
- "logps/rejected": -32.54629135131836,
230
- "loss": 0.4995,
231
- "rewards/accuracies": 0.48750001192092896,
232
- "rewards/chosen": 0.0013283130247145891,
233
- "rewards/margins": 0.0036107772029936314,
234
- "rewards/rejected": -0.002282463712617755,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8922052383422852,
241
- "logits/rejected": -1.8932926654815674,
242
- "logps/chosen": -34.210052490234375,
243
- "logps/rejected": -34.78533935546875,
244
- "loss": 0.4998,
245
- "rewards/accuracies": 0.48750001192092896,
246
- "rewards/chosen": -0.003472150769084692,
247
- "rewards/margins": 0.0021447453182190657,
248
- "rewards/rejected": -0.005616897251456976,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.9435640573501587,
255
- "logits/rejected": -1.9400733709335327,
256
- "logps/chosen": -36.18402862548828,
257
- "logps/rejected": -32.75020217895508,
258
- "loss": 0.4998,
259
- "rewards/accuracies": 0.48750001192092896,
260
- "rewards/chosen": -0.0024705410469323397,
261
- "rewards/margins": 0.0030191238038241863,
262
- "rewards/rejected": -0.00548966508358717,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.043527603149414,
269
- "logits/rejected": -2.0361220836639404,
270
- "logps/chosen": -33.81038284301758,
271
- "logps/rejected": -31.369457244873047,
272
- "loss": 0.4997,
273
- "rewards/accuracies": 0.550000011920929,
274
- "rewards/chosen": -0.002503907773643732,
275
- "rewards/margins": 0.0010178396478295326,
276
- "rewards/rejected": -0.0035217474214732647,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0494205951690674,
283
- "logits/rejected": -2.0546982288360596,
284
- "logps/chosen": -32.51402282714844,
285
- "logps/rejected": -32.5015983581543,
286
- "loss": 0.4982,
287
- "rewards/accuracies": 0.625,
288
- "rewards/chosen": 0.0061522433534264565,
289
- "rewards/margins": 0.009443378075957298,
290
- "rewards/rejected": -0.003291133791208267,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.0493359565734863,
297
- "logits/rejected": -2.046536922454834,
298
- "logps/chosen": -31.47686195373535,
299
- "logps/rejected": -31.326452255249023,
300
- "loss": 0.4991,
301
- "rewards/accuracies": 0.5874999761581421,
302
- "rewards/chosen": 0.002418341813609004,
303
- "rewards/margins": 0.004066127352416515,
304
- "rewards/rejected": -0.0016477858880534768,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9191436767578125,
311
- "logits/rejected": -1.9238201379776,
312
- "logps/chosen": -31.60672378540039,
313
- "logps/rejected": -32.80228805541992,
314
- "loss": 0.4994,
315
- "rewards/accuracies": 0.5249999761581421,
316
- "rewards/chosen": -8.77844140632078e-05,
317
- "rewards/margins": 0.002183270873501897,
318
- "rewards/rejected": -0.0022710547782480717,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2449820041656494,
324
- "eval_logits/rejected": -2.2400975227355957,
325
- "eval_logps/chosen": -34.03920364379883,
326
- "eval_logps/rejected": -37.51057434082031,
327
- "eval_loss": 0.5004644989967346,
328
- "eval_rewards/accuracies": 0.5020764470100403,
329
- "eval_rewards/chosen": -0.0009311072644777596,
330
- "eval_rewards/margins": -0.002139872871339321,
331
- "eval_rewards/rejected": 0.0012087655486539006,
332
- "eval_runtime": 146.0512,
333
- "eval_samples_per_second": 2.348,
334
- "eval_steps_per_second": 0.294,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.03255033493042,
341
- "logits/rejected": -2.0432262420654297,
342
- "logps/chosen": -31.951080322265625,
343
- "logps/rejected": -33.896484375,
344
- "loss": 0.498,
345
- "rewards/accuracies": 0.5874999761581421,
346
- "rewards/chosen": 0.004044383764266968,
347
- "rewards/margins": 0.010801524855196476,
348
- "rewards/rejected": -0.006757141090929508,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9258638620376587,
355
- "logits/rejected": -1.9407155513763428,
356
- "logps/chosen": -30.115543365478516,
357
- "logps/rejected": -31.575191497802734,
358
- "loss": 0.4992,
359
- "rewards/accuracies": 0.6000000238418579,
360
- "rewards/chosen": -0.0008008191362023354,
361
- "rewards/margins": 0.002602284774184227,
362
- "rewards/rejected": -0.003403103444725275,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.983538269996643,
369
- "logits/rejected": -1.9875080585479736,
370
- "logps/chosen": -33.397071838378906,
371
- "logps/rejected": -31.56662940979004,
372
- "loss": 0.4985,
373
  "rewards/accuracies": 0.550000011920929,
374
- "rewards/chosen": 0.004079356789588928,
375
- "rewards/margins": 0.007544734515249729,
376
- "rewards/rejected": -0.0034653779584914446,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9831326007843018,
383
- "logits/rejected": -1.9611790180206299,
384
- "logps/chosen": -34.164363861083984,
385
- "logps/rejected": -34.951324462890625,
386
- "loss": 0.5004,
387
- "rewards/accuracies": 0.4749999940395355,
388
- "rewards/chosen": -0.0021236296743154526,
389
- "rewards/margins": -0.0020176086109131575,
390
- "rewards/rejected": -0.00010602096881484613,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0248496532440186,
397
- "logits/rejected": -2.021538734436035,
398
- "logps/chosen": -32.92078399658203,
399
- "logps/rejected": -36.21001434326172,
400
- "loss": 0.5005,
401
- "rewards/accuracies": 0.4625000059604645,
402
- "rewards/chosen": 0.0008573724189773202,
403
- "rewards/margins": -0.002124571241438389,
404
- "rewards/rejected": 0.002981943776831031,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.8912862539291382,
411
- "logits/rejected": -1.8888485431671143,
412
- "logps/chosen": -34.20059585571289,
413
- "logps/rejected": -35.51679992675781,
414
- "loss": 0.5001,
415
- "rewards/accuracies": 0.48750001192092896,
416
- "rewards/chosen": -0.0024391734041273594,
417
- "rewards/margins": -3.7313438951969147e-06,
418
- "rewards/rejected": -0.002435441594570875,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.876117467880249,
425
- "logits/rejected": -1.8735599517822266,
426
- "logps/chosen": -34.39899444580078,
427
- "logps/rejected": -31.73566246032715,
428
- "loss": 0.5014,
429
- "rewards/accuracies": 0.4000000059604645,
430
- "rewards/chosen": -0.00027935029356740415,
431
- "rewards/margins": -0.007413160987198353,
432
- "rewards/rejected": 0.007133810315281153,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9805666208267212,
439
- "logits/rejected": -1.9699329137802124,
440
- "logps/chosen": -35.328285217285156,
441
- "logps/rejected": -31.84103012084961,
442
- "loss": 0.4988,
443
- "rewards/accuracies": 0.44999998807907104,
444
- "rewards/chosen": 0.005670648999512196,
445
- "rewards/margins": 0.0056780558079481125,
446
- "rewards/rejected": -7.406389158859383e-06,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.0757369995117188,
453
- "logits/rejected": -2.06070613861084,
454
- "logps/chosen": -30.9174861907959,
455
- "logps/rejected": -32.63935470581055,
456
- "loss": 0.5004,
457
- "rewards/accuracies": 0.44999998807907104,
458
- "rewards/chosen": 0.001327360630966723,
459
- "rewards/margins": -0.0011682776967063546,
460
- "rewards/rejected": 0.0024956378620117903,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.947251319885254,
467
- "logits/rejected": -1.9447133541107178,
468
- "logps/chosen": -32.908634185791016,
469
- "logps/rejected": -30.82659912109375,
470
- "loss": 0.499,
471
- "rewards/accuracies": 0.512499988079071,
472
- "rewards/chosen": 0.0036955091636627913,
473
- "rewards/margins": 0.005653515458106995,
474
- "rewards/rejected": -0.001958005130290985,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2445528507232666,
480
- "eval_logits/rejected": -2.23966383934021,
481
- "eval_logps/chosen": -34.04175567626953,
482
- "eval_logps/rejected": -37.499141693115234,
483
- "eval_loss": 0.5009724497795105,
484
- "eval_rewards/accuracies": 0.4431063234806061,
485
- "eval_rewards/chosen": -0.0014407250564545393,
486
- "eval_rewards/margins": -0.004937068559229374,
487
- "eval_rewards/rejected": 0.003496343968436122,
488
- "eval_runtime": 145.9859,
489
- "eval_samples_per_second": 2.35,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9292316436767578,
497
- "logits/rejected": -1.9259681701660156,
498
- "logps/chosen": -31.583276748657227,
499
- "logps/rejected": -33.727840423583984,
500
  "loss": 0.4993,
501
- "rewards/accuracies": 0.512499988079071,
502
- "rewards/chosen": 0.004015020560473204,
503
- "rewards/margins": 0.001152882701717317,
504
- "rewards/rejected": 0.002862137509509921,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9808372259140015,
511
- "logits/rejected": -1.9685356616973877,
512
- "logps/chosen": -34.57278823852539,
513
- "logps/rejected": -33.57910919189453,
514
- "loss": 0.498,
515
- "rewards/accuracies": 0.5874999761581421,
516
- "rewards/chosen": 0.004657471086829901,
517
- "rewards/margins": 0.01271037757396698,
518
- "rewards/rejected": -0.008052906021475792,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.0161705017089844,
525
- "logits/rejected": -2.0147035121917725,
526
- "logps/chosen": -33.47340393066406,
527
- "logps/rejected": -32.46953582763672,
528
- "loss": 0.4998,
529
- "rewards/accuracies": 0.48750001192092896,
530
- "rewards/chosen": 0.0024778605438768864,
531
- "rewards/margins": 0.00046821607975289226,
532
- "rewards/rejected": 0.0020096441730856895,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.1033377647399902,
539
- "logits/rejected": -2.0875496864318848,
540
- "logps/chosen": -34.152557373046875,
541
- "logps/rejected": -33.08795166015625,
542
- "loss": 0.5003,
543
- "rewards/accuracies": 0.5,
544
- "rewards/chosen": 0.005367305129766464,
545
- "rewards/margins": -0.0006489218212664127,
546
- "rewards/rejected": 0.006016227416694164,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9754228591918945,
553
- "logits/rejected": -1.974477767944336,
554
- "logps/chosen": -33.24272537231445,
555
- "logps/rejected": -32.46410369873047,
556
  "loss": 0.4986,
557
- "rewards/accuracies": 0.5375000238418579,
558
- "rewards/chosen": 0.007874632254242897,
559
- "rewards/margins": 0.007666703313589096,
560
- "rewards/rejected": 0.00020792819850612432,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9320882558822632,
567
- "logits/rejected": -1.9424550533294678,
568
- "logps/chosen": -32.212371826171875,
569
- "logps/rejected": -35.286354064941406,
570
- "loss": 0.5003,
571
- "rewards/accuracies": 0.4375,
572
- "rewards/chosen": -0.0015360517427325249,
573
- "rewards/margins": -0.002017115242779255,
574
- "rewards/rejected": 0.00048106274334713817,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.070864200592041,
581
- "logits/rejected": -2.0643177032470703,
582
- "logps/chosen": -33.652870178222656,
583
- "logps/rejected": -29.220972061157227,
584
- "loss": 0.5004,
585
- "rewards/accuracies": 0.4625000059604645,
586
- "rewards/chosen": -0.00018037435074802488,
587
- "rewards/margins": -0.001271072425879538,
588
- "rewards/rejected": 0.001090698060579598,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.9306039810180664,
595
- "logits/rejected": -1.932790756225586,
596
- "logps/chosen": -34.26443862915039,
597
- "logps/rejected": -30.895788192749023,
598
- "loss": 0.5001,
599
- "rewards/accuracies": 0.44999998807907104,
600
- "rewards/chosen": -0.005078143440186977,
601
- "rewards/margins": -0.00044635325320996344,
602
- "rewards/rejected": -0.004631790332496166,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.4997496530607149,
610
- "train_runtime": 3257.6696,
611
- "train_samples_per_second": 0.945,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.8662705421447754,
29
+ "logits/rejected": -1.870587706565857,
30
+ "logps/chosen": -36.9873046875,
31
+ "logps/rejected": -33.66048049926758,
32
+ "loss": 0.4974,
33
+ "rewards/accuracies": 0.5138888955116272,
34
+ "rewards/chosen": 0.005787754897028208,
35
+ "rewards/margins": 0.014910436235368252,
36
+ "rewards/rejected": -0.00912268366664648,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.997283935546875,
43
+ "logits/rejected": -1.9999357461929321,
44
+ "logps/chosen": -29.62722396850586,
45
+ "logps/rejected": -29.066070556640625,
46
+ "loss": 0.4988,
47
+ "rewards/accuracies": 0.5625,
48
+ "rewards/chosen": 0.004491320811212063,
49
+ "rewards/margins": 0.005279188044369221,
50
+ "rewards/rejected": -0.0007878671167418361,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.9196703433990479,
57
+ "logits/rejected": -1.9169772863388062,
58
+ "logps/chosen": -31.424610137939453,
59
+ "logps/rejected": -33.22700119018555,
60
+ "loss": 0.5001,
61
+ "rewards/accuracies": 0.512499988079071,
62
+ "rewards/chosen": -0.0025542874354869127,
63
+ "rewards/margins": 8.203647666960023e-06,
64
+ "rewards/rejected": -0.002562491921707988,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.0166847705841064,
71
+ "logits/rejected": -2.007936954498291,
72
+ "logps/chosen": -32.587745666503906,
73
+ "logps/rejected": -32.51628875732422,
74
+ "loss": 0.5006,
75
+ "rewards/accuracies": 0.5,
76
+ "rewards/chosen": -0.0032799947075545788,
77
+ "rewards/margins": -0.002195248380303383,
78
+ "rewards/rejected": -0.0010847460944205523,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.8643096685409546,
85
+ "logits/rejected": -1.853522539138794,
86
+ "logps/chosen": -33.55935287475586,
87
+ "logps/rejected": -35.42003631591797,
88
+ "loss": 0.5018,
89
+ "rewards/accuracies": 0.42500001192092896,
90
+ "rewards/chosen": -0.0005626566708087921,
91
+ "rewards/margins": -0.010254684835672379,
92
+ "rewards/rejected": 0.009692028164863586,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9451143741607666,
99
+ "logits/rejected": -1.9470453262329102,
100
+ "logps/chosen": -32.59963607788086,
101
+ "logps/rejected": -33.16991424560547,
102
+ "loss": 0.5,
103
+ "rewards/accuracies": 0.48750001192092896,
104
+ "rewards/chosen": 0.00016471892013214529,
105
+ "rewards/margins": -0.0002745899255387485,
106
+ "rewards/rejected": 0.00043930913670919836,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.080049991607666,
113
+ "logits/rejected": -2.085019588470459,
114
+ "logps/chosen": -33.990013122558594,
115
+ "logps/rejected": -36.58393478393555,
116
+ "loss": 0.4992,
117
+ "rewards/accuracies": 0.4749999940395355,
118
+ "rewards/chosen": -0.00045763104571960866,
119
+ "rewards/margins": 0.004060628358274698,
120
+ "rewards/rejected": -0.004518259782344103,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.943108320236206,
127
+ "logits/rejected": -1.9462664127349854,
128
+ "logps/chosen": -34.41640090942383,
129
+ "logps/rejected": -34.587799072265625,
130
+ "loss": 0.4998,
131
+ "rewards/accuracies": 0.5375000238418579,
132
+ "rewards/chosen": 0.0034226563293486834,
133
+ "rewards/margins": 0.002040152670815587,
134
+ "rewards/rejected": 0.0013825036585330963,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.9505999088287354,
141
+ "logits/rejected": -1.955100655555725,
142
+ "logps/chosen": -32.46385955810547,
143
+ "logps/rejected": -32.354026794433594,
144
+ "loss": 0.4998,
145
+ "rewards/accuracies": 0.48750001192092896,
146
+ "rewards/chosen": 0.0010680232662707567,
147
+ "rewards/margins": -0.00039678759640082717,
148
+ "rewards/rejected": 0.0014648116193711758,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.0487732887268066,
155
+ "logits/rejected": -2.0467641353607178,
156
+ "logps/chosen": -32.22932052612305,
157
+ "logps/rejected": -31.277706146240234,
158
+ "loss": 0.4987,
159
+ "rewards/accuracies": 0.512499988079071,
160
+ "rewards/chosen": 0.003454477759078145,
161
+ "rewards/margins": 0.0069038658402860165,
162
+ "rewards/rejected": -0.003449387848377228,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2437498569488525,
168
+ "eval_logits/rejected": -2.238868474960327,
169
+ "eval_logps/chosen": -34.018192291259766,
170
+ "eval_logps/rejected": -37.51003646850586,
171
+ "eval_loss": 0.49943608045578003,
172
+ "eval_rewards/accuracies": 0.5074750781059265,
173
+ "eval_rewards/chosen": 0.0049087232910096645,
174
+ "eval_rewards/margins": 0.00293393200263381,
175
+ "eval_rewards/rejected": 0.001974791055545211,
176
+ "eval_runtime": 146.1062,
177
+ "eval_samples_per_second": 2.348,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.005328893661499,
185
+ "logits/rejected": -2.0029098987579346,
186
+ "logps/chosen": -33.22368240356445,
187
+ "logps/rejected": -34.016380310058594,
188
+ "loss": 0.4995,
189
+ "rewards/accuracies": 0.5,
190
+ "rewards/chosen": 0.006039897445589304,
191
+ "rewards/margins": 0.0021425553131848574,
192
+ "rewards/rejected": 0.0038973423652350903,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.0170254707336426,
199
+ "logits/rejected": -2.0086662769317627,
200
+ "logps/chosen": -32.444522857666016,
201
+ "logps/rejected": -32.190677642822266,
202
+ "loss": 0.4978,
203
+ "rewards/accuracies": 0.574999988079071,
204
+ "rewards/chosen": 0.00011485694267321378,
205
+ "rewards/margins": 0.008865321055054665,
206
+ "rewards/rejected": -0.008750463835895061,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.0464835166931152,
213
+ "logits/rejected": -2.0384185314178467,
214
+ "logps/chosen": -30.49222183227539,
215
+ "logps/rejected": -32.05754852294922,
216
+ "loss": 0.5009,
217
+ "rewards/accuracies": 0.4375,
218
+ "rewards/chosen": -0.004951108247041702,
219
+ "rewards/margins": -0.0035899754147976637,
220
+ "rewards/rejected": -0.0013611322501674294,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.9772565364837646,
227
+ "logits/rejected": -1.987534761428833,
228
+ "logps/chosen": -31.39870262145996,
229
+ "logps/rejected": -32.54944610595703,
230
+ "loss": 0.4984,
231
+ "rewards/accuracies": 0.5625,
232
+ "rewards/chosen": 0.0062883696518838406,
233
+ "rewards/margins": 0.01065803598612547,
234
+ "rewards/rejected": -0.004369667265564203,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.8916442394256592,
241
+ "logits/rejected": -1.8927291631698608,
242
+ "logps/chosen": -34.21247863769531,
243
+ "logps/rejected": -34.76598358154297,
244
+ "loss": 0.501,
245
+ "rewards/accuracies": 0.4124999940395355,
246
+ "rewards/chosen": -0.005936207715421915,
247
+ "rewards/margins": -0.0033174168784171343,
248
+ "rewards/rejected": -0.0026187908370047808,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.9432052373886108,
255
+ "logits/rejected": -1.9397189617156982,
256
+ "logps/chosen": -36.16191864013672,
257
+ "logps/rejected": -32.74675750732422,
258
+ "loss": 0.4985,
259
+ "rewards/accuracies": 0.550000011920929,
260
+ "rewards/chosen": 0.002928206929937005,
261
+ "rewards/margins": 0.010129809379577637,
262
+ "rewards/rejected": -0.007201602216809988,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.042198896408081,
269
+ "logits/rejected": -2.034799098968506,
270
+ "logps/chosen": -33.776123046875,
271
+ "logps/rejected": -31.36188316345215,
272
+ "loss": 0.4979,
273
+ "rewards/accuracies": 0.5249999761581421,
274
+ "rewards/chosen": 0.0065220496617257595,
275
+ "rewards/margins": 0.009532475844025612,
276
+ "rewards/rejected": -0.003010427113622427,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.0474331378936768,
283
+ "logits/rejected": -2.0527079105377197,
284
+ "logps/chosen": -32.522254943847656,
285
+ "logps/rejected": -32.51903533935547,
286
+ "loss": 0.497,
287
+ "rewards/accuracies": 0.574999988079071,
288
+ "rewards/chosen": 0.006759388837963343,
289
+ "rewards/margins": 0.016926825046539307,
290
+ "rewards/rejected": -0.010167436674237251,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.0483100414276123,
297
+ "logits/rejected": -2.0455245971679688,
298
+ "logps/chosen": -31.49411964416504,
299
+ "logps/rejected": -31.317398071289062,
300
+ "loss": 0.5003,
301
+ "rewards/accuracies": 0.5,
302
+ "rewards/chosen": -0.0015499559231102467,
303
+ "rewards/margins": -0.0017946911975741386,
304
+ "rewards/rejected": 0.00024473536177538335,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9187097549438477,
311
+ "logits/rejected": -1.9233875274658203,
312
+ "logps/chosen": -31.572490692138672,
313
+ "logps/rejected": -32.78968811035156,
314
+ "loss": 0.4979,
315
+ "rewards/accuracies": 0.5874999761581421,
316
+ "rewards/chosen": 0.01013648696243763,
317
+ "rewards/margins": 0.009763057343661785,
318
+ "rewards/rejected": 0.0003734306083060801,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.243237257003784,
324
+ "eval_logits/rejected": -2.238358736038208,
325
+ "eval_logps/chosen": -34.021446228027344,
326
+ "eval_logps/rejected": -37.5062255859375,
327
+ "eval_loss": 0.499795526266098,
328
+ "eval_rewards/accuracies": 0.4862956702709198,
329
+ "eval_rewards/chosen": 0.003931538667529821,
330
+ "eval_rewards/margins": 0.0008117269026115537,
331
+ "eval_rewards/rejected": 0.003119812114164233,
332
+ "eval_runtime": 145.9975,
333
+ "eval_samples_per_second": 2.349,
334
+ "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0315887928009033,
341
+ "logits/rejected": -2.042264461517334,
342
+ "logps/chosen": -31.964473724365234,
343
+ "logps/rejected": -33.852508544921875,
344
+ "loss": 0.5002,
345
+ "rewards/accuracies": 0.4375,
346
+ "rewards/chosen": 0.0020490700844675303,
347
+ "rewards/margins": -0.001008716062642634,
348
+ "rewards/rejected": 0.003057786263525486,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.925210952758789,
355
+ "logits/rejected": -1.9400602579116821,
356
+ "logps/chosen": -30.0924129486084,
357
+ "logps/rejected": -31.5645694732666,
358
+ "loss": 0.4987,
359
+ "rewards/accuracies": 0.574999988079071,
360
+ "rewards/chosen": 0.0057382904924452305,
361
+ "rewards/margins": 0.0076560890302062035,
362
+ "rewards/rejected": -0.0019177987705916166,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.98291015625,
369
+ "logits/rejected": -1.9868860244750977,
370
+ "logps/chosen": -33.397972106933594,
371
+ "logps/rejected": -31.54607582092285,
372
+ "loss": 0.4988,
373
  "rewards/accuracies": 0.550000011920929,
374
+ "rewards/chosen": 0.005849479231983423,
375
+ "rewards/margins": 0.004881345666944981,
376
+ "rewards/rejected": 0.0009681343799456954,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.9833847284317017,
383
+ "logits/rejected": -1.9614006280899048,
384
+ "logps/chosen": -34.151329040527344,
385
+ "logps/rejected": -34.96162414550781,
386
+ "loss": 0.4995,
387
+ "rewards/accuracies": 0.5874999761581421,
388
+ "rewards/chosen": 0.0007256423123180866,
389
+ "rewards/margins": 0.0039748698472976685,
390
+ "rewards/rejected": -0.003249228000640869,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.0242693424224854,
397
+ "logits/rejected": -2.0209662914276123,
398
+ "logps/chosen": -32.91448974609375,
399
+ "logps/rejected": -36.216251373291016,
400
+ "loss": 0.4995,
401
+ "rewards/accuracies": 0.512499988079071,
402
+ "rewards/chosen": 0.003173952456563711,
403
+ "rewards/margins": 0.0005722272908315063,
404
+ "rewards/rejected": 0.0026017245836555958,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.890960931777954,
411
+ "logits/rejected": -1.8885114192962646,
412
+ "logps/chosen": -34.190608978271484,
413
+ "logps/rejected": -35.51725769042969,
414
+ "loss": 0.4997,
415
+ "rewards/accuracies": 0.5375000238418579,
416
+ "rewards/chosen": -0.0006630702991969883,
417
+ "rewards/margins": 0.0031278349924832582,
418
+ "rewards/rejected": -0.003790905699133873,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.875756025314331,
425
+ "logits/rejected": -1.8732185363769531,
426
+ "logps/chosen": -34.378639221191406,
427
+ "logps/rejected": -31.751827239990234,
428
+ "loss": 0.4999,
429
+ "rewards/accuracies": 0.5625,
430
+ "rewards/chosen": 0.005688496865332127,
431
+ "rewards/margins": -0.00016186293214559555,
432
+ "rewards/rejected": 0.005850359331816435,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.9793775081634521,
439
+ "logits/rejected": -1.9687445163726807,
440
+ "logps/chosen": -35.337181091308594,
441
+ "logps/rejected": -31.856109619140625,
442
+ "loss": 0.4979,
443
+ "rewards/accuracies": 0.512499988079071,
444
+ "rewards/chosen": 0.005838591605424881,
445
+ "rewards/margins": 0.010373086668550968,
446
+ "rewards/rejected": -0.0045344955287873745,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.0756278038024902,
453
+ "logits/rejected": -2.0606112480163574,
454
+ "logps/chosen": -30.936153411865234,
455
+ "logps/rejected": -32.660118103027344,
456
+ "loss": 0.5002,
457
+ "rewards/accuracies": 0.5,
458
+ "rewards/chosen": -0.003609871957451105,
459
+ "rewards/margins": -0.001124321250244975,
460
+ "rewards/rejected": -0.0024855495430529118,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.9465348720550537,
467
+ "logits/rejected": -1.9439990520477295,
468
+ "logps/chosen": -32.864601135253906,
469
+ "logps/rejected": -30.782527923583984,
470
+ "loss": 0.4978,
471
+ "rewards/accuracies": 0.5375000238418579,
472
+ "rewards/chosen": 0.01875343546271324,
473
+ "rewards/margins": 0.008469512686133385,
474
+ "rewards/rejected": 0.010283923707902431,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.243088722229004,
480
+ "eval_logits/rejected": -2.2382218837738037,
481
+ "eval_logps/chosen": -34.02065658569336,
482
+ "eval_logps/rejected": -37.50462341308594,
483
+ "eval_loss": 0.4998457729816437,
484
+ "eval_rewards/accuracies": 0.5137043595314026,
485
+ "eval_rewards/chosen": 0.004168376792222261,
486
+ "eval_rewards/margins": 0.0005690783145837486,
487
+ "eval_rewards/rejected": 0.0035992988850921392,
488
+ "eval_runtime": 145.9968,
489
+ "eval_samples_per_second": 2.349,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.928755521774292,
497
+ "logits/rejected": -1.9254906177520752,
498
+ "logps/chosen": -31.5792236328125,
499
+ "logps/rejected": -33.729896545410156,
500
  "loss": 0.4993,
501
+ "rewards/accuracies": 0.550000011920929,
502
+ "rewards/chosen": 0.0072378418408334255,
503
+ "rewards/margins": 0.0035611852072179317,
504
+ "rewards/rejected": 0.0036766561679542065,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.9806935787200928,
511
+ "logits/rejected": -1.968379020690918,
512
+ "logps/chosen": -34.56597900390625,
513
+ "logps/rejected": -33.5899658203125,
514
+ "loss": 0.4957,
515
+ "rewards/accuracies": 0.637499988079071,
516
+ "rewards/chosen": 0.009028470143675804,
517
+ "rewards/margins": 0.024366382509469986,
518
+ "rewards/rejected": -0.015337912365794182,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.0154762268066406,
525
+ "logits/rejected": -2.014007091522217,
526
+ "logps/chosen": -33.468040466308594,
527
+ "logps/rejected": -32.471534729003906,
528
+ "loss": 0.4996,
529
+ "rewards/accuracies": 0.5,
530
+ "rewards/chosen": 0.005325844045728445,
531
+ "rewards/margins": 0.0029113669879734516,
532
+ "rewards/rejected": 0.0024144775234162807,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.102926254272461,
539
+ "logits/rejected": -2.0871331691741943,
540
+ "logps/chosen": -34.16908645629883,
541
+ "logps/rejected": -33.08391189575195,
542
+ "loss": 0.5017,
543
+ "rewards/accuracies": 0.512499988079071,
544
+ "rewards/chosen": 0.003090888261795044,
545
+ "rewards/margins": -0.007146535906940699,
546
+ "rewards/rejected": 0.010237423703074455,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.974313735961914,
553
+ "logits/rejected": -1.9733721017837524,
554
+ "logps/chosen": -33.25641632080078,
555
+ "logps/rejected": -32.458290100097656,
556
  "loss": 0.4986,
557
+ "rewards/accuracies": 0.48750001192092896,
558
+ "rewards/chosen": 0.007705795578658581,
559
+ "rewards/margins": 0.005649130791425705,
560
+ "rewards/rejected": 0.0020566643215715885,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9302194118499756,
567
+ "logits/rejected": -1.940574288368225,
568
+ "logps/chosen": -32.19397735595703,
569
+ "logps/rejected": -35.28559875488281,
570
+ "loss": 0.4996,
571
+ "rewards/accuracies": 0.5,
572
+ "rewards/chosen": 0.003214403986930847,
573
+ "rewards/margins": 0.0022677627857774496,
574
+ "rewards/rejected": 0.0009466406190767884,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.0694031715393066,
581
+ "logits/rejected": -2.0628397464752197,
582
+ "logps/chosen": -33.639854431152344,
583
+ "logps/rejected": -29.21550941467285,
584
+ "loss": 0.4998,
585
+ "rewards/accuracies": 0.550000011920929,
586
+ "rewards/chosen": 0.0036338320933282375,
587
+ "rewards/margins": 0.00035826730891130865,
588
+ "rewards/rejected": 0.0032755639404058456,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.9289796352386475,
595
+ "logits/rejected": -1.9311374425888062,
596
+ "logps/chosen": -34.22175979614258,
597
+ "logps/rejected": -30.88715171813965,
598
+ "loss": 0.4984,
599
+ "rewards/accuracies": 0.5625,
600
+ "rewards/chosen": 0.005186386872082949,
601
+ "rewards/margins": 0.009542430751025677,
602
+ "rewards/rejected": -0.004356043878942728,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.49920677581390777,
610
+ "train_runtime": 3256.0166,
611
+ "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],