hugodk-sch commited on
Commit
f2f58a7
1 Parent(s): d81592d

Model save

Browse files
Files changed (5) hide show
  1. README.md +12 -15
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +378 -378
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.9997
24
- - Rewards/chosen: -0.0006
25
- - Rewards/rejected: -0.0009
26
  - Rewards/accuracies: 0.5478
27
- - Rewards/margins: 0.0003
28
- - Logps/rejected: -37.6060
29
- - Logps/chosen: -34.0918
30
- - Logits/rejected: -2.2164
31
- - Logits/chosen: -2.2213
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.9987 | 0.26 | 100 | 0.9998 | -0.0002 | -0.0004 | 0.5336 | 0.0002 | -37.5528 | -34.0499 | -2.2264 | -2.2313 |
67
- | 0.9965 | 0.52 | 200 | 0.9996 | -0.0003 | -0.0006 | 0.5071 | 0.0004 | -37.5790 | -34.0618 | -2.2204 | -2.2252 |
68
- | 0.9925 | 0.78 | 300 | 0.9996 | -0.0005 | -0.0009 | 0.5594 | 0.0004 | -37.6063 | -34.0836 | -2.2166 | -2.2214 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.6913
21
+ - Rewards/chosen: -0.0042
22
+ - Rewards/rejected: -0.0084
23
  - Rewards/accuracies: 0.5478
24
+ - Rewards/margins: 0.0042
25
+ - Logps/rejected: -37.6008
26
+ - Logps/chosen: -34.0770
27
+ - Logits/rejected: -2.2215
28
+ - Logits/chosen: -2.2263
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6868 | 0.26 | 100 | 0.6924 | -0.0018 | -0.0036 | 0.5282 | 0.0018 | -37.5527 | -34.0527 | -2.2284 | -2.2332 |
64
+ | 0.6774 | 0.52 | 200 | 0.6914 | -0.0013 | -0.0052 | 0.5511 | 0.0039 | -37.5690 | -34.0476 | -2.2247 | -2.2295 |
65
+ | 0.6615 | 0.78 | 300 | 0.6913 | -0.0042 | -0.0084 | 0.5478 | 0.0042 | -37.6008 | -34.0770 | -2.2215 | -2.2263 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d1b2226341b09e45acd3cfc68bd07df0a3bb41fe0bdd348f1d55167d9b2880e
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b8c4489220458976f2273f5710e8ba9db7bbf807d408284e42a2cbd2ebd4de0
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.221278667449951,
4
- "eval_logits/rejected": -2.2164483070373535,
5
- "eval_logps/chosen": -34.091766357421875,
6
- "eval_logps/rejected": -37.60596466064453,
7
- "eval_loss": 0.9996774792671204,
8
- "eval_rewards/accuracies": 0.5477575063705444,
9
- "eval_rewards/chosen": -0.000572161574382335,
10
- "eval_rewards/margins": 0.00032129042665474117,
11
- "eval_rewards/rejected": -0.0008934520301409066,
12
- "eval_runtime": 145.6061,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.356,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.9973225085766284,
17
- "train_runtime": 3253.1307,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.946,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6806787961489195,
4
+ "train_runtime": 3251.1509,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.9973225085766284,
4
- "train_runtime": 3253.1307,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6806787961489195,
4
+ "train_runtime": 3251.1509,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8664803504943848,
29
- "logits/rejected": -1.8707994222640991,
30
- "logps/chosen": -36.978511810302734,
31
- "logps/rejected": -33.66939163208008,
32
- "loss": 0.9993,
33
- "rewards/accuracies": 0.5694444179534912,
34
- "rewards/chosen": 0.00028087408281862736,
35
- "rewards/margins": 0.0006740752141922712,
36
- "rewards/rejected": -0.00039320107316598296,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9984451532363892,
43
- "logits/rejected": -2.0010995864868164,
44
- "logps/chosen": -29.63176918029785,
45
- "logps/rejected": -29.05954933166504,
46
- "loss": 0.9999,
47
- "rewards/accuracies": 0.48750001192092896,
48
- "rewards/chosen": 0.00010425634536659345,
49
- "rewards/margins": 6.528960511786863e-05,
50
- "rewards/rejected": 3.8966707506915554e-05,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9210799932479858,
57
- "logits/rejected": -1.9183847904205322,
58
- "logps/chosen": -31.414783477783203,
59
- "logps/rejected": -33.19659423828125,
60
- "loss": 1.0002,
61
- "rewards/accuracies": 0.4749999940395355,
62
- "rewards/chosen": 1.3138540452928282e-05,
63
- "rewards/margins": -0.00020548875909298658,
64
- "rewards/rejected": 0.00021862727589905262,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0177221298217773,
71
- "logits/rejected": -2.008965492248535,
72
- "logps/chosen": -32.57322311401367,
73
- "logps/rejected": -32.500308990478516,
74
- "loss": 1.0001,
75
- "rewards/accuracies": 0.4749999940395355,
76
- "rewards/chosen": 3.584356090868823e-05,
77
- "rewards/margins": -8.777440234553069e-05,
78
- "rewards/rejected": 0.00012361796689219773,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8622690439224243,
85
- "logits/rejected": -1.851509690284729,
86
- "logps/chosen": -33.547603607177734,
87
- "logps/rejected": -35.463592529296875,
88
- "loss": 0.9998,
89
- "rewards/accuracies": 0.5,
90
- "rewards/chosen": 9.876764670480043e-05,
91
- "rewards/margins": 0.0002112251240760088,
92
- "rewards/rejected": -0.00011245747737120837,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9400131702423096,
99
- "logits/rejected": -1.9419806003570557,
100
- "logps/chosen": -32.52842330932617,
101
- "logps/rejected": -33.22877883911133,
102
- "loss": 0.9987,
103
- "rewards/accuracies": 0.6499999761581421,
104
- "rewards/chosen": 0.0007175664068199694,
105
- "rewards/margins": 0.0012915965635329485,
106
- "rewards/rejected": -0.00057403021492064,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.070552349090576,
113
- "logits/rejected": -2.0755274295806885,
114
- "logps/chosen": -34.00461959838867,
115
- "logps/rejected": -36.64922332763672,
116
- "loss": 0.9994,
117
- "rewards/accuracies": 0.612500011920929,
118
- "rewards/chosen": -0.00016131921438500285,
119
- "rewards/margins": 0.0006422021542675793,
120
- "rewards/rejected": -0.0008035213686525822,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9306777715682983,
127
- "logits/rejected": -1.9338254928588867,
128
- "logps/chosen": -34.32624816894531,
129
- "logps/rejected": -34.661468505859375,
130
- "loss": 0.9983,
131
- "rewards/accuracies": 0.612500011920929,
132
- "rewards/chosen": 0.0010156143689528108,
133
- "rewards/margins": 0.0017062196275219321,
134
- "rewards/rejected": -0.0006906053749844432,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9389193058013916,
141
- "logits/rejected": -1.9434226751327515,
142
- "logps/chosen": -32.38957214355469,
143
- "logps/rejected": -32.348140716552734,
144
- "loss": 0.9993,
145
- "rewards/accuracies": 0.5874999761581421,
146
- "rewards/chosen": 0.0007785108755342662,
147
- "rewards/margins": 0.0006708315922878683,
148
- "rewards/rejected": 0.00010767912317533046,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0358777046203613,
155
- "logits/rejected": -2.0339014530181885,
156
- "logps/chosen": -32.13254165649414,
157
- "logps/rejected": -31.29019546508789,
158
- "loss": 0.9987,
159
- "rewards/accuracies": 0.612500011920929,
160
- "rewards/chosen": 0.0010829826351255178,
161
- "rewards/margins": 0.001322855008766055,
162
- "rewards/rejected": -0.0002398724900558591,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2312774658203125,
168
- "eval_logits/rejected": -2.226422071456909,
169
- "eval_logps/chosen": -34.04991149902344,
170
- "eval_logps/rejected": -37.55283737182617,
171
- "eval_loss": 0.9997907280921936,
172
- "eval_rewards/accuracies": 0.5336378812789917,
173
- "eval_rewards/chosen": -0.0001535558985779062,
174
- "eval_rewards/margins": 0.00020861340453848243,
175
- "eval_rewards/rejected": -0.00036216925946064293,
176
- "eval_runtime": 146.0254,
177
- "eval_samples_per_second": 2.349,
178
- "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9907060861587524,
185
- "logits/rejected": -1.9883339405059814,
186
- "logps/chosen": -33.13169860839844,
187
- "logps/rejected": -34.033958435058594,
188
- "loss": 0.9988,
189
- "rewards/accuracies": 0.6875,
190
- "rewards/chosen": 0.0011211589444428682,
191
- "rewards/margins": 0.0011670273961499333,
192
- "rewards/rejected": -4.586850991472602e-05,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.002023458480835,
199
- "logits/rejected": -1.993699312210083,
200
- "logps/chosen": -32.341697692871094,
201
- "logps/rejected": -32.16511917114258,
202
- "loss": 0.9989,
203
- "rewards/accuracies": 0.612500011920929,
204
- "rewards/chosen": 0.0010320657165721059,
205
- "rewards/margins": 0.001068194629624486,
206
- "rewards/rejected": -3.612901855376549e-05,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.028505802154541,
213
- "logits/rejected": -2.020526885986328,
214
- "logps/chosen": -30.3519287109375,
215
- "logps/rejected": -32.101314544677734,
216
- "loss": 0.9983,
217
- "rewards/accuracies": 0.612500011920929,
218
- "rewards/chosen": 0.0012378758983686566,
219
- "rewards/margins": 0.001720982021652162,
220
- "rewards/rejected": -0.0004831062688026577,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9588673114776611,
227
- "logits/rejected": -1.9690834283828735,
228
- "logps/chosen": -31.205490112304688,
229
- "logps/rejected": -32.55961608886719,
230
- "loss": 0.9976,
231
- "rewards/accuracies": 0.6499999761581421,
232
- "rewards/chosen": 0.0021417266689240932,
233
- "rewards/margins": 0.0023890691809356213,
234
- "rewards/rejected": -0.00024734257021918893,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8695415258407593,
241
- "logits/rejected": -1.8707062005996704,
242
- "logps/chosen": -33.88127899169922,
243
- "logps/rejected": -34.7686653137207,
244
- "loss": 0.9968,
245
- "rewards/accuracies": 0.574999988079071,
246
- "rewards/chosen": 0.0031141345389187336,
247
- "rewards/margins": 0.003228238318115473,
248
- "rewards/rejected": -0.00011410393926780671,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9212032556533813,
255
- "logits/rejected": -1.9178003072738647,
256
- "logps/chosen": -35.99773406982422,
257
- "logps/rejected": -32.705848693847656,
258
- "loss": 0.9984,
259
- "rewards/accuracies": 0.6499999761581421,
260
- "rewards/chosen": 0.0017394202295690775,
261
- "rewards/margins": 0.0015704210381954908,
262
- "rewards/rejected": 0.00016899927868507802,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0206995010375977,
269
- "logits/rejected": -2.0133931636810303,
270
- "logps/chosen": -33.504085540771484,
271
- "logps/rejected": -31.432220458984375,
272
- "loss": 0.9963,
273
- "rewards/accuracies": 0.637499988079071,
274
- "rewards/chosen": 0.0029377774335443974,
275
- "rewards/margins": 0.003741443855687976,
276
- "rewards/rejected": -0.0008036663057282567,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.0269291400909424,
283
- "logits/rejected": -2.032160997390747,
284
- "logps/chosen": -32.24355697631836,
285
- "logps/rejected": -32.431182861328125,
286
- "loss": 0.9975,
287
- "rewards/accuracies": 0.625,
288
- "rewards/chosen": 0.003012270200997591,
289
- "rewards/margins": 0.0024727012496441603,
290
- "rewards/rejected": 0.0005395688931457698,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.027367115020752,
297
- "logits/rejected": -2.0246078968048096,
298
- "logps/chosen": -31.290613174438477,
299
- "logps/rejected": -31.361133575439453,
300
- "loss": 0.9976,
301
- "rewards/accuracies": 0.625,
302
- "rewards/chosen": 0.0019833946134895086,
303
- "rewards/margins": 0.002412599278613925,
304
- "rewards/rejected": -0.00042920451960526407,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.8985168933868408,
311
- "logits/rejected": -1.903148889541626,
312
- "logps/chosen": -31.30405616760254,
313
- "logps/rejected": -32.838443756103516,
314
- "loss": 0.9965,
315
  "rewards/accuracies": 0.7250000238418579,
316
- "rewards/chosen": 0.0030222723726183176,
317
- "rewards/margins": 0.0034973658621311188,
318
- "rewards/rejected": -0.0004750936641357839,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.225177764892578,
324
- "eval_logits/rejected": -2.2203547954559326,
325
- "eval_logps/chosen": -34.06184387207031,
326
- "eval_logps/rejected": -37.579010009765625,
327
- "eval_loss": 0.9996482133865356,
328
- "eval_rewards/accuracies": 0.5070598125457764,
329
- "eval_rewards/chosen": -0.0002729461120907217,
330
- "eval_rewards/margins": 0.00035095339990220964,
331
- "eval_rewards/rejected": -0.0006238995119929314,
332
- "eval_runtime": 145.704,
333
- "eval_samples_per_second": 2.354,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.011120557785034,
341
- "logits/rejected": -2.021751880645752,
342
- "logps/chosen": -31.745685577392578,
343
- "logps/rejected": -33.96772003173828,
344
- "loss": 0.9967,
345
- "rewards/accuracies": 0.637499988079071,
346
- "rewards/chosen": 0.0022561827208846807,
347
- "rewards/margins": 0.003306365106254816,
348
- "rewards/rejected": -0.0010501822689548135,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.903857946395874,
355
- "logits/rejected": -1.918621301651001,
356
- "logps/chosen": -29.797290802001953,
357
- "logps/rejected": -31.628814697265625,
358
- "loss": 0.9962,
359
- "rewards/accuracies": 0.7124999761581421,
360
- "rewards/chosen": 0.0031425058841705322,
361
- "rewards/margins": 0.0038488968275487423,
362
- "rewards/rejected": -0.0007063907687552273,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9593979120254517,
369
- "logits/rejected": -1.9633464813232422,
370
- "logps/chosen": -33.067623138427734,
371
- "logps/rejected": -31.64206886291504,
372
- "loss": 0.9956,
373
- "rewards/accuracies": 0.675000011920929,
374
- "rewards/chosen": 0.003498472273349762,
375
- "rewards/margins": 0.004426136147230864,
376
- "rewards/rejected": -0.0009276636992581189,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9572632312774658,
383
- "logits/rejected": -1.9354908466339111,
384
- "logps/chosen": -33.843727111816406,
385
- "logps/rejected": -35.1453742980957,
386
- "loss": 0.995,
387
  "rewards/accuracies": 0.699999988079071,
388
- "rewards/chosen": 0.0031002266332507133,
389
- "rewards/margins": 0.0050460235215723515,
390
- "rewards/rejected": -0.0019457967719063163,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -1.9997854232788086,
397
- "logits/rejected": -1.9964803457260132,
398
- "logps/chosen": -32.75019454956055,
399
- "logps/rejected": -36.28661346435547,
400
- "loss": 0.9976,
401
- "rewards/accuracies": 0.637499988079071,
402
- "rewards/chosen": 0.0017487213481217623,
403
- "rewards/margins": 0.0023655896075069904,
404
- "rewards/rejected": -0.0006168682011775672,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8673791885375977,
411
- "logits/rejected": -1.8649587631225586,
412
- "logps/chosen": -34.018226623535156,
413
- "logps/rejected": -35.539276123046875,
414
- "loss": 0.998,
415
- "rewards/accuracies": 0.612500011920929,
416
- "rewards/chosen": 0.001701725646853447,
417
- "rewards/margins": 0.002048287307843566,
418
- "rewards/rejected": -0.0003465614281594753,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8522275686264038,
425
- "logits/rejected": -1.849872350692749,
426
- "logps/chosen": -34.16339874267578,
427
- "logps/rejected": -31.845317840576172,
428
- "loss": 0.9969,
429
- "rewards/accuracies": 0.612500011920929,
430
- "rewards/chosen": 0.0023420110810548067,
431
- "rewards/margins": 0.0030819105450063944,
432
- "rewards/rejected": -0.0007398994639515877,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9549518823623657,
439
- "logits/rejected": -1.94447922706604,
440
- "logps/chosen": -35.027687072753906,
441
- "logps/rejected": -31.895471572875977,
442
- "loss": 0.9962,
443
- "rewards/accuracies": 0.699999988079071,
444
- "rewards/chosen": 0.003289591521024704,
445
- "rewards/margins": 0.003834384260699153,
446
- "rewards/rejected": -0.0005447928560897708,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0498766899108887,
453
- "logits/rejected": -2.034980297088623,
454
- "logps/chosen": -30.72440528869629,
455
- "logps/rejected": -32.658695220947266,
456
- "loss": 0.9979,
457
- "rewards/accuracies": 0.637499988079071,
458
- "rewards/chosen": 0.0019971781875938177,
459
- "rewards/margins": 0.002065772656351328,
460
- "rewards/rejected": -6.859400309622288e-05,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9201946258544922,
467
- "logits/rejected": -1.9177051782608032,
468
- "logps/chosen": -32.3183479309082,
469
- "logps/rejected": -30.95510482788086,
470
- "loss": 0.9925,
471
- "rewards/accuracies": 0.7749999761581421,
472
- "rewards/chosen": 0.006087628658860922,
473
- "rewards/margins": 0.007470599375665188,
474
- "rewards/rejected": -0.0013829706003889441,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2213804721832275,
480
- "eval_logits/rejected": -2.216555118560791,
481
- "eval_logps/chosen": -34.083614349365234,
482
- "eval_logps/rejected": -37.60634994506836,
483
- "eval_loss": 0.999591052532196,
484
- "eval_rewards/accuracies": 0.559385359287262,
485
- "eval_rewards/chosen": -0.0004906260874122381,
486
- "eval_rewards/margins": 0.00040669209556654096,
487
- "eval_rewards/rejected": -0.0008973181829787791,
488
- "eval_runtime": 145.8707,
489
- "eval_samples_per_second": 2.351,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9060642719268799,
497
- "logits/rejected": -1.9028133153915405,
498
- "logps/chosen": -31.319162368774414,
499
- "logps/rejected": -33.85043716430664,
500
- "loss": 0.9961,
501
- "rewards/accuracies": 0.737500011920929,
502
- "rewards/chosen": 0.002841859357431531,
503
- "rewards/margins": 0.003924719989299774,
504
- "rewards/rejected": -0.0010828599333763123,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9553836584091187,
511
- "logits/rejected": -1.9432109594345093,
512
- "logps/chosen": -34.27588653564453,
513
- "logps/rejected": -33.672359466552734,
514
- "loss": 0.9955,
515
- "rewards/accuracies": 0.699999988079071,
516
- "rewards/chosen": 0.0032019000500440598,
517
- "rewards/margins": 0.004537059459835291,
518
- "rewards/rejected": -0.0013351596426218748,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9905990362167358,
525
- "logits/rejected": -1.9891618490219116,
526
- "logps/chosen": -33.116233825683594,
527
- "logps/rejected": -32.55724334716797,
528
- "loss": 0.9955,
529
- "rewards/accuracies": 0.737500011920929,
530
- "rewards/chosen": 0.0036955769173800945,
531
- "rewards/margins": 0.004472161643207073,
532
- "rewards/rejected": -0.0007765850750729442,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0769362449645996,
539
- "logits/rejected": -2.0613036155700684,
540
- "logps/chosen": -33.791297912597656,
541
- "logps/rejected": -33.12422180175781,
542
- "loss": 0.9961,
543
- "rewards/accuracies": 0.699999988079071,
544
- "rewards/chosen": 0.003880967851728201,
545
- "rewards/margins": 0.00394281093031168,
546
- "rewards/rejected": -6.184288213262334e-05,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.950060248374939,
553
- "logits/rejected": -1.9492241144180298,
554
- "logps/chosen": -32.82404327392578,
555
- "logps/rejected": -32.50709915161133,
556
- "loss": 0.995,
557
- "rewards/accuracies": 0.7124999761581421,
558
- "rewards/chosen": 0.004580510314553976,
559
- "rewards/margins": 0.005000022705644369,
560
- "rewards/rejected": -0.000419511750806123,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9050449132919312,
567
- "logits/rejected": -1.915305733680725,
568
- "logps/chosen": -31.87860679626465,
569
- "logps/rejected": -35.34981155395508,
570
- "loss": 0.9961,
571
- "rewards/accuracies": 0.6875,
572
- "rewards/chosen": 0.0032608420588076115,
573
- "rewards/margins": 0.0038713677786290646,
574
- "rewards/rejected": -0.0006105261854827404,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.04546856880188,
581
- "logits/rejected": -2.039043426513672,
582
- "logps/chosen": -33.336219787597656,
583
- "logps/rejected": -29.269311904907227,
584
- "loss": 0.9964,
585
- "rewards/accuracies": 0.6625000238418579,
586
- "rewards/chosen": 0.0031574335880577564,
587
- "rewards/margins": 0.003586276201531291,
588
- "rewards/rejected": -0.0004288425261620432,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.905160665512085,
595
- "logits/rejected": -1.907360315322876,
596
- "logps/chosen": -33.86741256713867,
597
- "logps/rejected": -30.982807159423828,
598
- "loss": 0.9952,
599
- "rewards/accuracies": 0.7250000238418579,
600
- "rewards/chosen": 0.0037163912784308195,
601
- "rewards/margins": 0.004818186163902283,
602
- "rewards/rejected": -0.0011017953511327505,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.9973225085766284,
610
- "train_runtime": 3253.1307,
611
- "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.866548776626587,
29
+ "logits/rejected": -1.870866298675537,
30
+ "logps/chosen": -36.98152160644531,
31
+ "logps/rejected": -33.65525436401367,
32
+ "loss": 0.6907,
33
+ "rewards/accuracies": 0.5833333134651184,
34
+ "rewards/chosen": 0.0025075911544263363,
35
+ "rewards/margins": 0.005025296472012997,
36
+ "rewards/rejected": -0.002517705550417304,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9975509643554688,
43
+ "logits/rejected": -2.000202178955078,
44
+ "logps/chosen": -29.649311065673828,
45
+ "logps/rejected": -29.060571670532227,
46
+ "loss": 0.6937,
47
+ "rewards/accuracies": 0.42500001192092896,
48
+ "rewards/chosen": -0.000711599481292069,
49
+ "rewards/margins": -0.0009990204125642776,
50
+ "rewards/rejected": 0.00028742075664922595,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9204410314559937,
57
+ "logits/rejected": -1.9177614450454712,
58
+ "logps/chosen": -31.389001846313477,
59
+ "logps/rejected": -33.2293701171875,
60
+ "loss": 0.6913,
61
+ "rewards/accuracies": 0.6000000238418579,
62
+ "rewards/chosen": 0.0027091833762824535,
63
+ "rewards/margins": 0.0038000899367034435,
64
+ "rewards/rejected": -0.0010909067932516336,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0175061225891113,
71
+ "logits/rejected": -2.0087640285491943,
72
+ "logps/chosen": -32.5477180480957,
73
+ "logps/rejected": -32.5131950378418,
74
+ "loss": 0.6917,
75
+ "rewards/accuracies": 0.574999988079071,
76
+ "rewards/chosen": 0.002909052651375532,
77
+ "rewards/margins": 0.0029616341926157475,
78
+ "rewards/rejected": -5.258135570329614e-05,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.862919569015503,
85
+ "logits/rejected": -1.8521493673324585,
86
+ "logps/chosen": -33.53850555419922,
87
+ "logps/rejected": -35.44303512573242,
88
+ "loss": 0.6927,
89
+ "rewards/accuracies": 0.512499988079071,
90
+ "rewards/chosen": 0.001897345995530486,
91
+ "rewards/margins": 0.0009661337244324386,
92
+ "rewards/rejected": 0.0009312118636444211,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.941716194152832,
99
+ "logits/rejected": -1.943652868270874,
100
+ "logps/chosen": -32.55483627319336,
101
+ "logps/rejected": -33.212310791015625,
102
+ "loss": 0.6891,
103
+ "rewards/accuracies": 0.5625,
104
+ "rewards/chosen": 0.004534821957349777,
105
+ "rewards/margins": 0.008627313189208508,
106
+ "rewards/rejected": -0.004092490766197443,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.072801113128662,
113
+ "logits/rejected": -2.0777623653411865,
114
+ "logps/chosen": -33.98583221435547,
115
+ "logps/rejected": -36.61463165283203,
116
+ "loss": 0.6909,
117
+ "rewards/accuracies": 0.5,
118
+ "rewards/chosen": 0.0002649508533068001,
119
+ "rewards/margins": 0.0048407576978206635,
120
+ "rewards/rejected": -0.004575806204229593,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.933315634727478,
127
+ "logits/rejected": -1.9364687204360962,
128
+ "logps/chosen": -34.30742645263672,
129
+ "logps/rejected": -34.65543746948242,
130
+ "loss": 0.6843,
131
+ "rewards/accuracies": 0.6000000238418579,
132
+ "rewards/chosen": 0.01203834917396307,
133
+ "rewards/margins": 0.0183414313942194,
134
+ "rewards/rejected": -0.006303082220256329,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9412227869033813,
141
+ "logits/rejected": -1.9457390308380127,
142
+ "logps/chosen": -32.38041305541992,
143
+ "logps/rejected": -32.36061096191406,
144
+ "loss": 0.6889,
145
+ "rewards/accuracies": 0.625,
146
+ "rewards/chosen": 0.008700890466570854,
147
+ "rewards/margins": 0.0088711092248559,
148
+ "rewards/rejected": -0.0001702193112578243,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.038828134536743,
155
+ "logits/rejected": -2.0368473529815674,
156
+ "logps/chosen": -32.14335250854492,
157
+ "logps/rejected": -31.2996768951416,
158
+ "loss": 0.6868,
159
+ "rewards/accuracies": 0.574999988079071,
160
+ "rewards/chosen": 0.009748170152306557,
161
+ "rewards/margins": 0.013094509951770306,
162
+ "rewards/rejected": -0.0033463402651250362,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2332358360290527,
168
+ "eval_logits/rejected": -2.228397846221924,
169
+ "eval_logps/chosen": -34.052730560302734,
170
+ "eval_logps/rejected": -37.55271911621094,
171
+ "eval_loss": 0.692415714263916,
172
+ "eval_rewards/accuracies": 0.5282392501831055,
173
+ "eval_rewards/chosen": -0.0018174793804064393,
174
+ "eval_rewards/margins": 0.0017920632380992174,
175
+ "eval_rewards/rejected": -0.0036095422692596912,
176
+ "eval_runtime": 145.8681,
177
+ "eval_samples_per_second": 2.351,
178
+ "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9926027059555054,
185
+ "logits/rejected": -1.9902327060699463,
186
+ "logps/chosen": -33.1365852355957,
187
+ "logps/rejected": -34.04566192626953,
188
+ "loss": 0.6877,
189
+ "rewards/accuracies": 0.625,
190
+ "rewards/chosen": 0.010723221115767956,
191
+ "rewards/margins": 0.012352446094155312,
192
+ "rewards/rejected": -0.0016292240470647812,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0035624504089355,
199
+ "logits/rejected": -1.995218276977539,
200
+ "logps/chosen": -32.34514617919922,
201
+ "logps/rejected": -32.111610412597656,
202
+ "loss": 0.6909,
203
+ "rewards/accuracies": 0.512499988079071,
204
+ "rewards/chosen": 0.009975815191864967,
205
+ "rewards/margins": 0.004986769054085016,
206
+ "rewards/rejected": 0.004989045672118664,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0313966274261475,
213
+ "logits/rejected": -2.0234367847442627,
214
+ "logps/chosen": -30.32912254333496,
215
+ "logps/rejected": -32.07999801635742,
216
+ "loss": 0.6851,
217
+ "rewards/accuracies": 0.625,
218
+ "rewards/chosen": 0.014659431762993336,
219
+ "rewards/margins": 0.017358481884002686,
220
+ "rewards/rejected": -0.0026990489568561316,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9624744653701782,
227
+ "logits/rejected": -1.9726974964141846,
228
+ "logps/chosen": -31.2264347076416,
229
+ "logps/rejected": -32.56539535522461,
230
+ "loss": 0.6823,
231
+ "rewards/accuracies": 0.7250000238418579,
232
+ "rewards/chosen": 0.019322697073221207,
233
+ "rewards/margins": 0.02237415872514248,
234
+ "rewards/rejected": -0.0030514628160744905,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8730823993682861,
241
+ "logits/rejected": -1.8742510080337524,
242
+ "logps/chosen": -33.906436920166016,
243
+ "logps/rejected": -34.79541778564453,
244
+ "loss": 0.6777,
245
+ "rewards/accuracies": 0.5625,
246
+ "rewards/chosen": 0.028625473380088806,
247
+ "rewards/margins": 0.03244180604815483,
248
+ "rewards/rejected": -0.0038163296412676573,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.924594521522522,
255
+ "logits/rejected": -1.9211914539337158,
256
+ "logps/chosen": -35.99129867553711,
257
+ "logps/rejected": -32.720947265625,
258
+ "loss": 0.6845,
259
+ "rewards/accuracies": 0.675000011920929,
260
+ "rewards/chosen": 0.0180378220975399,
261
+ "rewards/margins": 0.01785714365541935,
262
+ "rewards/rejected": 0.0001806807704269886,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.025442600250244,
269
+ "logits/rejected": -2.0181422233581543,
270
+ "logps/chosen": -33.491790771484375,
271
+ "logps/rejected": -31.42473793029785,
272
+ "loss": 0.6749,
273
+ "rewards/accuracies": 0.699999988079071,
274
+ "rewards/chosen": 0.030607115477323532,
275
+ "rewards/margins": 0.03789608180522919,
276
+ "rewards/rejected": -0.007288967724889517,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0314249992370605,
283
+ "logits/rejected": -2.0366628170013428,
284
+ "logps/chosen": -32.224815368652344,
285
+ "logps/rejected": -32.438621520996094,
286
+ "loss": 0.68,
287
+ "rewards/accuracies": 0.675000011920929,
288
+ "rewards/chosen": 0.03199741616845131,
289
+ "rewards/margins": 0.02734515443444252,
290
+ "rewards/rejected": 0.004652261734008789,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.032400131225586,
297
+ "logits/rejected": -2.0296273231506348,
298
+ "logps/chosen": -31.274377822875977,
299
+ "logps/rejected": -31.340478897094727,
300
+ "loss": 0.6818,
301
+ "rewards/accuracies": 0.6499999761581421,
302
+ "rewards/chosen": 0.021457459777593613,
303
+ "rewards/margins": 0.023684043437242508,
304
+ "rewards/rejected": -0.002226583892479539,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.902413010597229,
311
+ "logits/rejected": -1.9070699214935303,
312
+ "logps/chosen": -31.3071231842041,
313
+ "logps/rejected": -32.819236755371094,
314
+ "loss": 0.6774,
315
  "rewards/accuracies": 0.7250000238418579,
316
+ "rewards/chosen": 0.029915904626250267,
317
+ "rewards/margins": 0.03274631127715111,
318
+ "rewards/rejected": -0.0028304127044975758,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2295496463775635,
324
+ "eval_logits/rejected": -2.224731206893921,
325
+ "eval_logps/chosen": -34.047569274902344,
326
+ "eval_logps/rejected": -37.568965911865234,
327
+ "eval_loss": 0.6914328336715698,
328
+ "eval_rewards/accuracies": 0.5510797500610352,
329
+ "eval_rewards/chosen": -0.0013017345918342471,
330
+ "eval_rewards/margins": 0.003932718187570572,
331
+ "eval_rewards/rejected": -0.005234452895820141,
332
+ "eval_runtime": 145.6523,
333
+ "eval_samples_per_second": 2.355,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0154383182525635,
341
+ "logits/rejected": -2.0260632038116455,
342
+ "logps/chosen": -31.753280639648438,
343
+ "logps/rejected": -33.95705032348633,
344
+ "loss": 0.6781,
345
+ "rewards/accuracies": 0.6000000238418579,
346
+ "rewards/chosen": 0.021802183240652084,
347
+ "rewards/margins": 0.031237006187438965,
348
+ "rewards/rejected": -0.009434822015464306,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9075886011123657,
355
+ "logits/rejected": -1.9223048686981201,
356
+ "logps/chosen": -29.80865478515625,
357
+ "logps/rejected": -31.63385009765625,
358
+ "loss": 0.675,
359
+ "rewards/accuracies": 0.75,
360
+ "rewards/chosen": 0.03028874099254608,
361
+ "rewards/margins": 0.037856362760066986,
362
+ "rewards/rejected": -0.007567620370537043,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9642608165740967,
369
+ "logits/rejected": -1.9682174921035767,
370
+ "logps/chosen": -33.096744537353516,
371
+ "logps/rejected": -31.623376846313477,
372
+ "loss": 0.6746,
373
+ "rewards/accuracies": 0.6625000238418579,
374
+ "rewards/chosen": 0.032072193920612335,
375
+ "rewards/margins": 0.03947969526052475,
376
+ "rewards/rejected": -0.0074075027368962765,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.962222695350647,
383
+ "logits/rejected": -1.9404484033584595,
384
+ "logps/chosen": -33.80894088745117,
385
+ "logps/rejected": -35.11548614501953,
386
+ "loss": 0.6688,
387
  "rewards/accuracies": 0.699999988079071,
388
+ "rewards/chosen": 0.034481119364500046,
389
+ "rewards/margins": 0.050950419157743454,
390
+ "rewards/rejected": -0.016469307243824005,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0041451454162598,
397
+ "logits/rejected": -2.0008480548858643,
398
+ "logps/chosen": -32.71742630004883,
399
+ "logps/rejected": -36.27611541748047,
400
+ "loss": 0.6808,
401
+ "rewards/accuracies": 0.612500011920929,
402
+ "rewards/chosen": 0.020764300599694252,
403
+ "rewards/margins": 0.02588369883596897,
404
+ "rewards/rejected": -0.005119399167597294,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.871769666671753,
411
+ "logits/rejected": -1.8693596124649048,
412
+ "logps/chosen": -33.992244720458984,
413
+ "logps/rejected": -35.55255126953125,
414
+ "loss": 0.6816,
415
+ "rewards/accuracies": 0.675000011920929,
416
+ "rewards/chosen": 0.019615134224295616,
417
+ "rewards/margins": 0.024408066645264626,
418
+ "rewards/rejected": -0.004792929161339998,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8571555614471436,
425
+ "logits/rejected": -1.8547788858413696,
426
+ "logps/chosen": -34.180908203125,
427
+ "logps/rejected": -31.8300838470459,
428
+ "loss": 0.6802,
429
+ "rewards/accuracies": 0.574999988079071,
430
+ "rewards/chosen": 0.021669462323188782,
431
+ "rewards/margins": 0.027544772252440453,
432
+ "rewards/rejected": -0.00587531179189682,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9601259231567383,
439
+ "logits/rejected": -1.949663519859314,
440
+ "logps/chosen": -34.989627838134766,
441
+ "logps/rejected": -31.897205352783203,
442
+ "loss": 0.6728,
443
+ "rewards/accuracies": 0.675000011920929,
444
+ "rewards/chosen": 0.03670142963528633,
445
+ "rewards/margins": 0.042322538793087006,
446
+ "rewards/rejected": -0.005621104035526514,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.054929494857788,
453
+ "logits/rejected": -2.040048122406006,
454
+ "logps/chosen": -30.71946144104004,
455
+ "logps/rejected": -32.615081787109375,
456
+ "loss": 0.6854,
457
+ "rewards/accuracies": 0.625,
458
+ "rewards/chosen": 0.020466715097427368,
459
+ "rewards/margins": 0.016791274771094322,
460
+ "rewards/rejected": 0.0036754377651959658,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9249824285507202,
467
+ "logits/rejected": -1.9225151538848877,
468
+ "logps/chosen": -32.352317810058594,
469
+ "logps/rejected": -30.916645050048828,
470
+ "loss": 0.6615,
471
+ "rewards/accuracies": 0.7250000238418579,
472
+ "rewards/chosen": 0.05747910216450691,
473
+ "rewards/margins": 0.06746286153793335,
474
+ "rewards/rejected": -0.009983762167394161,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2262814044952393,
480
+ "eval_logits/rejected": -2.221453905105591,
481
+ "eval_logps/chosen": -34.07697677612305,
482
+ "eval_logps/rejected": -37.600772857666016,
483
+ "eval_loss": 0.6913371086120605,
484
+ "eval_rewards/accuracies": 0.5477575063705444,
485
+ "eval_rewards/chosen": -0.004243030212819576,
486
+ "eval_rewards/margins": 0.004172220826148987,
487
+ "eval_rewards/rejected": -0.008415251038968563,
488
+ "eval_runtime": 145.7082,
489
+ "eval_samples_per_second": 2.354,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9102771282196045,
497
+ "logits/rejected": -1.9070390462875366,
498
+ "logps/chosen": -31.315908432006836,
499
+ "logps/rejected": -33.82908248901367,
500
+ "loss": 0.6755,
501
+ "rewards/accuracies": 0.7124999761581421,
502
+ "rewards/chosen": 0.02874402143061161,
503
+ "rewards/margins": 0.037437256425619125,
504
+ "rewards/rejected": -0.008693234995007515,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.960473656654358,
511
+ "logits/rejected": -1.9483349323272705,
512
+ "logps/chosen": -34.295162200927734,
513
+ "logps/rejected": -33.6617431640625,
514
+ "loss": 0.6729,
515
+ "rewards/accuracies": 0.6499999761581421,
516
+ "rewards/chosen": 0.03009115532040596,
517
+ "rewards/margins": 0.04238158464431763,
518
+ "rewards/rejected": -0.012290433049201965,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9958908557891846,
525
+ "logits/rejected": -1.994442343711853,
526
+ "logps/chosen": -33.12481689453125,
527
+ "logps/rejected": -32.55192184448242,
528
+ "loss": 0.6726,
529
+ "rewards/accuracies": 0.699999988079071,
530
+ "rewards/chosen": 0.036097489297389984,
531
+ "rewards/margins": 0.04333154112100601,
532
+ "rewards/rejected": -0.007234054151922464,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0822036266326904,
539
+ "logits/rejected": -2.06657075881958,
540
+ "logps/chosen": -33.78117752075195,
541
+ "logps/rejected": -33.087318420410156,
542
+ "loss": 0.6755,
543
+ "rewards/accuracies": 0.675000011920929,
544
+ "rewards/chosen": 0.03982168063521385,
545
+ "rewards/margins": 0.036749765276908875,
546
+ "rewards/rejected": 0.0030719186179339886,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9546077251434326,
553
+ "logits/rejected": -1.9537696838378906,
554
+ "logps/chosen": -32.80642318725586,
555
+ "logps/rejected": -32.51500701904297,
556
+ "loss": 0.6684,
557
+ "rewards/accuracies": 0.7250000238418579,
558
+ "rewards/chosen": 0.04756731912493706,
559
+ "rewards/margins": 0.05255315452814102,
560
+ "rewards/rejected": -0.004985834006220102,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9106451272964478,
567
+ "logits/rejected": -1.9209150075912476,
568
+ "logps/chosen": -31.8604736328125,
569
+ "logps/rejected": -35.30449676513672,
570
+ "loss": 0.676,
571
+ "rewards/accuracies": 0.637499988079071,
572
+ "rewards/chosen": 0.034421421587467194,
573
+ "rewards/margins": 0.03599538281559944,
574
+ "rewards/rejected": -0.0015739675145596266,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.051037311553955,
581
+ "logits/rejected": -2.044602870941162,
582
+ "logps/chosen": -33.32126235961914,
583
+ "logps/rejected": -29.267892837524414,
584
+ "loss": 0.6752,
585
+ "rewards/accuracies": 0.737500011920929,
586
+ "rewards/chosen": 0.033070918172597885,
587
+ "rewards/margins": 0.03721793740987778,
588
+ "rewards/rejected": -0.004147022031247616,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9098504781723022,
595
+ "logits/rejected": -1.9120371341705322,
596
+ "logps/chosen": -33.857635498046875,
597
+ "logps/rejected": -30.970882415771484,
598
+ "loss": 0.6702,
599
+ "rewards/accuracies": 0.762499988079071,
600
+ "rewards/chosen": 0.03814157843589783,
601
+ "rewards/margins": 0.04796718806028366,
602
+ "rewards/rejected": -0.009825612418353558,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.6806787961489195,
610
+ "train_runtime": 3251.1509,
611
+ "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],