hugodk-sch commited on
Commit
470a703
1 Parent(s): f1cd9f7

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +374 -374
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.6903
24
- - Rewards/chosen: -0.0025
25
- - Rewards/rejected: -0.0237
26
- - Rewards/accuracies: 0.5519
27
- - Rewards/margins: 0.0212
28
- - Logps/rejected: -37.5562
29
- - Logps/chosen: -34.0388
30
- - Logits/rejected: -2.2221
31
- - Logits/chosen: -2.2270
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.6595 | 0.26 | 100 | 0.6900 | 0.0170 | 0.0010 | 0.5253 | 0.0160 | -37.5150 | -34.0063 | -2.2287 | -2.2336 |
67
- | 0.6189 | 0.52 | 200 | 0.6895 | 0.0107 | -0.0106 | 0.5341 | 0.0213 | -37.5344 | -34.0167 | -2.2259 | -2.2307 |
68
- | 0.5582 | 0.78 | 300 | 0.6872 | -0.0004 | -0.0276 | 0.5598 | 0.0272 | -37.5627 | -34.0353 | -2.2221 | -2.2269 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.4937
21
+ - Rewards/chosen: 0.1639
22
+ - Rewards/rejected: 0.1391
23
+ - Rewards/accuracies: 0.5361
24
+ - Rewards/margins: 0.0248
25
+ - Logps/rejected: -37.2848
26
+ - Logps/chosen: -33.7614
27
+ - Logits/rejected: -2.2261
28
+ - Logits/chosen: -2.2309
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.4799 | 0.26 | 100 | 0.4987 | 0.0790 | 0.0734 | 0.5104 | 0.0056 | -37.3943 | -33.9029 | -2.2301 | -2.2349 |
64
+ | 0.4548 | 0.52 | 200 | 0.4956 | 0.1590 | 0.1392 | 0.5341 | 0.0198 | -37.2846 | -33.7696 | -2.2287 | -2.2335 |
65
+ | 0.41 | 0.78 | 300 | 0.4937 | 0.1639 | 0.1391 | 0.5361 | 0.0248 | -37.2848 | -33.7614 | -2.2261 | -2.2309 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:551da0818c9710df6bd28d0096381ec12741c94227c876abb8c993660ecfeae2
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f4af652c4082ace59d6cfd645e0b1edcf903193c93e0147e4ef37697cdaee0f
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.226956844329834,
4
- "eval_logits/rejected": -2.2221333980560303,
5
- "eval_logps/chosen": -34.03876876831055,
6
- "eval_logps/rejected": -37.55620193481445,
7
- "eval_loss": 0.6902801394462585,
8
- "eval_rewards/accuracies": 0.5519103407859802,
9
- "eval_rewards/chosen": -0.002528026234358549,
10
- "eval_rewards/margins": 0.021220751106739044,
11
- "eval_rewards/rejected": -0.02374877780675888,
12
- "eval_runtime": 145.6815,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.354,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.6364745784115482,
17
- "train_runtime": 3251.6479,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.46439293204963983,
4
+ "train_runtime": 3253.2697,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6364745784115482,
4
- "train_runtime": 3251.6479,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.46439293204963983,
4
+ "train_runtime": 3253.2697,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,467 +25,467 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8668782711029053,
29
- "logits/rejected": -1.8712005615234375,
30
- "logps/chosen": -36.98646545410156,
31
- "logps/rejected": -33.67870330810547,
32
- "loss": 0.6747,
33
- "rewards/accuracies": 0.5833333134651184,
34
- "rewards/chosen": 0.012081857770681381,
35
- "rewards/margins": 0.04126200079917908,
36
- "rewards/rejected": -0.029180139303207397,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9979515075683594,
43
- "logits/rejected": -2.0005881786346436,
44
- "logps/chosen": -29.662744522094727,
45
- "logps/rejected": -29.051654815673828,
46
- "loss": 0.7042,
47
- "rewards/accuracies": 0.4000000059604645,
48
- "rewards/chosen": -0.01232814695686102,
49
- "rewards/margins": -0.019403135403990746,
50
- "rewards/rejected": 0.007074988447129726,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.921088457107544,
57
- "logits/rejected": -1.9184081554412842,
58
- "logps/chosen": -31.383258819580078,
59
- "logps/rejected": -33.23828887939453,
60
- "loss": 0.6794,
61
- "rewards/accuracies": 0.5874999761581421,
62
- "rewards/chosen": 0.019701367244124413,
63
- "rewards/margins": 0.03159697726368904,
64
- "rewards/rejected": -0.011895612813532352,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.017509937286377,
71
- "logits/rejected": -2.0087647438049316,
72
- "logps/chosen": -32.577518463134766,
73
- "logps/rejected": -32.509830474853516,
74
- "loss": 0.6964,
75
- "rewards/accuracies": 0.48750001192092896,
76
- "rewards/chosen": -0.00042539089918136597,
77
- "rewards/margins": -0.0021285698749125004,
78
- "rewards/rejected": 0.001703177229501307,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8623021841049194,
85
- "logits/rejected": -1.8515303134918213,
86
- "logps/chosen": -33.56303787231445,
87
- "logps/rejected": -35.47795867919922,
88
- "loss": 0.6896,
89
- "rewards/accuracies": 0.5249999761581421,
90
- "rewards/chosen": -0.0033326249103993177,
91
- "rewards/margins": 0.012034483253955841,
92
- "rewards/rejected": -0.015367108397185802,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9411529302597046,
99
- "logits/rejected": -1.943098783493042,
100
- "logps/chosen": -32.549232482910156,
101
- "logps/rejected": -33.20621109008789,
102
- "loss": 0.6747,
103
- "rewards/accuracies": 0.5249999761581421,
104
- "rewards/chosen": 0.030568569898605347,
105
- "rewards/margins": 0.051466990262269974,
106
- "rewards/rejected": -0.02089841663837433,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0727076530456543,
113
- "logits/rejected": -2.07767391204834,
114
- "logps/chosen": -33.96394729614258,
115
- "logps/rejected": -36.61058807373047,
116
- "loss": 0.6813,
117
- "rewards/accuracies": 0.5249999761581421,
118
- "rewards/chosen": 0.014724211767315865,
119
- "rewards/margins": 0.03975607082247734,
120
- "rewards/rejected": -0.025031859055161476,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9330183267593384,
127
- "logits/rejected": -1.936171293258667,
128
- "logps/chosen": -34.318397521972656,
129
- "logps/rejected": -34.61701583862305,
130
- "loss": 0.6634,
131
- "rewards/accuracies": 0.550000011920929,
132
- "rewards/chosen": 0.06564848870038986,
133
- "rewards/margins": 0.08041460067033768,
134
- "rewards/rejected": -0.014766111969947815,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9412405490875244,
141
- "logits/rejected": -1.945755958557129,
142
- "logps/chosen": -32.374977111816406,
143
- "logps/rejected": -32.33773422241211,
144
- "loss": 0.6787,
145
- "rewards/accuracies": 0.6499999761581421,
146
- "rewards/chosen": 0.05546582490205765,
147
- "rewards/margins": 0.04276125878095627,
148
- "rewards/rejected": 0.012704563327133656,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0381903648376465,
155
- "logits/rejected": -2.0362119674682617,
156
- "logps/chosen": -32.13701629638672,
157
- "logps/rejected": -31.295801162719727,
158
- "loss": 0.6595,
159
- "rewards/accuracies": 0.6499999761581421,
160
- "rewards/chosen": 0.06229018047451973,
161
- "rewards/margins": 0.08004496991634369,
162
- "rewards/rejected": -0.017754793167114258,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2335634231567383,
168
- "eval_logits/rejected": -2.228721857070923,
169
- "eval_logps/chosen": -34.00627899169922,
170
- "eval_logps/rejected": -37.514984130859375,
171
- "eval_loss": 0.6900185346603394,
172
- "eval_rewards/accuracies": 0.5253322720527649,
173
- "eval_rewards/chosen": 0.016964510083198547,
174
- "eval_rewards/margins": 0.015982570126652718,
175
- "eval_rewards/rejected": 0.0009819410042837262,
176
- "eval_runtime": 145.7969,
177
- "eval_samples_per_second": 2.353,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9929395914077759,
185
- "logits/rejected": -1.99057936668396,
186
- "logps/chosen": -33.06591796875,
187
- "logps/rejected": -34.01008605957031,
188
- "loss": 0.6678,
189
- "rewards/accuracies": 0.675000011920929,
190
- "rewards/chosen": 0.10673947632312775,
191
- "rewards/margins": 0.09516827017068863,
192
- "rewards/rejected": 0.011571208015084267,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0045437812805176,
199
- "logits/rejected": -1.9962146282196045,
200
- "logps/chosen": -32.33503341674805,
201
- "logps/rejected": -32.12450408935547,
202
- "loss": 0.68,
203
- "rewards/accuracies": 0.5625,
204
- "rewards/chosen": 0.06592197716236115,
205
- "rewards/margins": 0.043721526861190796,
206
- "rewards/rejected": 0.02220045030117035,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.032952308654785,
213
- "logits/rejected": -2.024994134902954,
214
- "logps/chosen": -30.302433013916016,
215
- "logps/rejected": -32.04313278198242,
216
- "loss": 0.6609,
217
- "rewards/accuracies": 0.612500011920929,
218
- "rewards/chosen": 0.10397056490182877,
219
- "rewards/margins": 0.09804753214120865,
220
- "rewards/rejected": 0.0059230271726846695,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9635902643203735,
227
- "logits/rejected": -1.9738080501556396,
228
- "logps/chosen": -31.201534271240234,
229
- "logps/rejected": -32.556739807128906,
230
- "loss": 0.6343,
231
- "rewards/accuracies": 0.6875,
232
- "rewards/chosen": 0.13087674975395203,
233
- "rewards/margins": 0.1439923495054245,
234
- "rewards/rejected": -0.013115609996020794,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8755298852920532,
241
- "logits/rejected": -1.876691222190857,
242
- "logps/chosen": -33.89242172241211,
243
- "logps/rejected": -34.753570556640625,
244
- "loss": 0.6272,
245
  "rewards/accuracies": 0.612500011920929,
246
- "rewards/chosen": 0.1801605522632599,
247
- "rewards/margins": 0.17795029282569885,
248
- "rewards/rejected": 0.002210266888141632,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9265025854110718,
255
- "logits/rejected": -1.9230976104736328,
256
- "logps/chosen": -35.98413848876953,
257
- "logps/rejected": -32.69154357910156,
258
- "loss": 0.6574,
259
- "rewards/accuracies": 0.574999988079071,
260
- "rewards/chosen": 0.11252401769161224,
261
- "rewards/margins": 0.09379850327968597,
262
- "rewards/rejected": 0.01872551441192627,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0262577533721924,
269
- "logits/rejected": -2.0189335346221924,
270
- "logps/chosen": -33.45969772338867,
271
- "logps/rejected": -31.372516632080078,
272
- "loss": 0.6076,
273
- "rewards/accuracies": 0.675000011920929,
274
- "rewards/chosen": 0.20289841294288635,
275
- "rewards/margins": 0.21529710292816162,
276
- "rewards/rejected": -0.012398697435855865,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.0338973999023438,
283
- "logits/rejected": -2.0391509532928467,
284
- "logps/chosen": -32.194793701171875,
285
- "logps/rejected": -32.42069625854492,
286
- "loss": 0.6221,
287
- "rewards/accuracies": 0.737500011920929,
288
- "rewards/chosen": 0.20999665558338165,
289
- "rewards/margins": 0.17132976651191711,
290
- "rewards/rejected": 0.03866690397262573,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.035045862197876,
297
- "logits/rejected": -2.032278060913086,
298
- "logps/chosen": -31.230976104736328,
299
- "logps/rejected": -31.29391860961914,
300
- "loss": 0.6403,
301
- "rewards/accuracies": 0.637499988079071,
302
- "rewards/chosen": 0.1547853648662567,
303
- "rewards/margins": 0.14020755887031555,
304
- "rewards/rejected": 0.014577840454876423,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9042125940322876,
311
- "logits/rejected": -1.9088417291641235,
312
- "logps/chosen": -31.285167694091797,
313
- "logps/rejected": -32.79944610595703,
314
- "loss": 0.6189,
315
- "rewards/accuracies": 0.737500011920929,
316
- "rewards/chosen": 0.192669078707695,
317
- "rewards/margins": 0.1977730393409729,
318
- "rewards/rejected": -0.0051039643585681915,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.230701446533203,
324
- "eval_logits/rejected": -2.2258734703063965,
325
- "eval_logps/chosen": -34.0167350769043,
326
- "eval_logps/rejected": -37.53435134887695,
327
- "eval_loss": 0.6894960403442383,
328
  "eval_rewards/accuracies": 0.5340532064437866,
329
- "eval_rewards/chosen": 0.010691030882298946,
330
- "eval_rewards/margins": 0.02133062295615673,
331
- "eval_rewards/rejected": -0.010639593005180359,
332
- "eval_runtime": 145.5559,
333
- "eval_samples_per_second": 2.356,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.018108367919922,
341
- "logits/rejected": -2.0287561416625977,
342
- "logps/chosen": -31.745798110961914,
343
- "logps/rejected": -33.90629577636719,
344
- "loss": 0.6305,
345
- "rewards/accuracies": 0.675000011920929,
346
- "rewards/chosen": 0.13530384004116058,
347
- "rewards/margins": 0.16146349906921387,
348
- "rewards/rejected": -0.026159662753343582,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9105422496795654,
355
- "logits/rejected": -1.9253056049346924,
356
- "logps/chosen": -29.849069595336914,
357
- "logps/rejected": -31.58382797241211,
358
- "loss": 0.6251,
359
- "rewards/accuracies": 0.6625000238418579,
360
- "rewards/chosen": 0.15748274326324463,
361
- "rewards/margins": 0.17287404835224152,
362
- "rewards/rejected": -0.015391310676932335,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.966581106185913,
369
- "logits/rejected": -1.9705555438995361,
370
- "logps/chosen": -33.059837341308594,
371
- "logps/rejected": -31.605152130126953,
372
- "loss": 0.6,
373
- "rewards/accuracies": 0.7124999761581421,
374
- "rewards/chosen": 0.21457910537719727,
375
- "rewards/margins": 0.2480895221233368,
376
- "rewards/rejected": -0.033510446548461914,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9644801616668701,
383
- "logits/rejected": -1.9426641464233398,
384
- "logps/chosen": -33.819881439208984,
385
- "logps/rejected": -35.105430603027344,
386
- "loss": 0.5826,
387
- "rewards/accuracies": 0.7250000238418579,
388
- "rewards/chosen": 0.20031993091106415,
389
- "rewards/margins": 0.2930986285209656,
390
- "rewards/rejected": -0.09277870506048203,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.005814552307129,
397
- "logits/rejected": -2.002516746520996,
398
- "logps/chosen": -32.663055419921875,
399
- "logps/rejected": -36.233436584472656,
400
- "loss": 0.632,
401
- "rewards/accuracies": 0.6625000238418579,
402
- "rewards/chosen": 0.15720662474632263,
403
- "rewards/margins": 0.16231317818164825,
404
- "rewards/rejected": -0.0051065413281321526,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8735284805297852,
411
- "logits/rejected": -1.8710968494415283,
412
- "logps/chosen": -33.964805603027344,
413
- "logps/rejected": -35.50111389160156,
414
- "loss": 0.6428,
415
  "rewards/accuracies": 0.6499999761581421,
416
- "rewards/chosen": 0.13415411114692688,
417
- "rewards/margins": 0.13204893469810486,
418
- "rewards/rejected": 0.0021051731891930103,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8585532903671265,
425
- "logits/rejected": -1.8561296463012695,
426
- "logps/chosen": -34.17288589477539,
427
- "logps/rejected": -31.792491912841797,
428
- "loss": 0.6391,
429
- "rewards/accuracies": 0.6875,
430
- "rewards/chosen": 0.13482868671417236,
431
- "rewards/margins": 0.14752644300460815,
432
- "rewards/rejected": -0.012697766534984112,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9624073505401611,
439
- "logits/rejected": -1.9519150257110596,
440
- "logps/chosen": -34.95975875854492,
441
- "logps/rejected": -31.84967041015625,
442
- "loss": 0.5947,
443
- "rewards/accuracies": 0.7749999761581421,
444
- "rewards/chosen": 0.2381301373243332,
445
- "rewards/margins": 0.2433377206325531,
446
- "rewards/rejected": -0.005207589361816645,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.056440830230713,
453
- "logits/rejected": -2.0415446758270264,
454
- "logps/chosen": -30.704137802124023,
455
- "logps/rejected": -32.5970458984375,
456
- "loss": 0.6654,
457
- "rewards/accuracies": 0.5625,
458
- "rewards/chosen": 0.13198992609977722,
459
- "rewards/margins": 0.09911760687828064,
460
- "rewards/rejected": 0.03287229686975479,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.927122712135315,
467
- "logits/rejected": -1.924602746963501,
468
- "logps/chosen": -32.37379455566406,
469
- "logps/rejected": -30.879268646240234,
470
- "loss": 0.5582,
471
- "rewards/accuracies": 0.7124999761581421,
472
- "rewards/chosen": 0.3319894075393677,
473
- "rewards/margins": 0.3694665729999542,
474
- "rewards/rejected": -0.03747714310884476,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.226900815963745,
480
- "eval_logits/rejected": -2.2220816612243652,
481
- "eval_logps/chosen": -34.03526306152344,
482
- "eval_logps/rejected": -37.56266403198242,
483
- "eval_loss": 0.6872054934501648,
484
- "eval_rewards/accuracies": 0.5598006844520569,
485
- "eval_rewards/chosen": -0.0004257837135810405,
486
- "eval_rewards/margins": 0.027201363816857338,
487
- "eval_rewards/rejected": -0.027627145871520042,
488
- "eval_runtime": 145.6667,
489
  "eval_samples_per_second": 2.355,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
@@ -493,122 +493,122 @@
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9124987125396729,
497
- "logits/rejected": -1.9092620611190796,
498
- "logps/chosen": -31.285348892211914,
499
- "logps/rejected": -33.75069046020508,
500
- "loss": 0.6188,
501
- "rewards/accuracies": 0.675000011920929,
502
- "rewards/chosen": 0.19080111384391785,
503
- "rewards/margins": 0.1959228217601776,
504
- "rewards/rejected": -0.0051217032596468925,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9616212844848633,
511
- "logits/rejected": -1.9494349956512451,
512
- "logps/chosen": -34.30357360839844,
513
- "logps/rejected": -33.64542770385742,
514
- "loss": 0.5999,
515
- "rewards/accuracies": 0.6875,
516
- "rewards/chosen": 0.17549821734428406,
517
- "rewards/margins": 0.23945149779319763,
518
- "rewards/rejected": -0.06395327299833298,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.997859239578247,
525
- "logits/rejected": -1.9964158535003662,
526
- "logps/chosen": -33.130615234375,
527
- "logps/rejected": -32.511531829833984,
528
- "loss": 0.6073,
529
- "rewards/accuracies": 0.7124999761581421,
530
- "rewards/chosen": 0.21310487389564514,
531
- "rewards/margins": 0.23227325081825256,
532
- "rewards/rejected": -0.01916835829615593,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.083962917327881,
539
- "logits/rejected": -2.06827449798584,
540
- "logps/chosen": -33.732425689697266,
541
- "logps/rejected": -33.07551193237305,
542
- "loss": 0.5985,
543
- "rewards/accuracies": 0.762499988079071,
544
- "rewards/chosen": 0.2681804299354553,
545
- "rewards/margins": 0.24266552925109863,
546
- "rewards/rejected": 0.025514895096421242,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.956364631652832,
553
- "logits/rejected": -1.9555118083953857,
554
- "logps/chosen": -32.79610824584961,
555
- "logps/rejected": -32.512969970703125,
556
- "loss": 0.5758,
557
- "rewards/accuracies": 0.7250000238418579,
558
- "rewards/chosen": 0.2915918231010437,
559
- "rewards/margins": 0.3202829957008362,
560
- "rewards/rejected": -0.028691178187727928,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9118818044662476,
567
- "logits/rejected": -1.9221813678741455,
568
- "logps/chosen": -31.855304718017578,
569
- "logps/rejected": -35.32331848144531,
570
- "loss": 0.6036,
571
- "rewards/accuracies": 0.7250000238418579,
572
- "rewards/chosen": 0.2096320390701294,
573
- "rewards/margins": 0.23036828637123108,
574
- "rewards/rejected": -0.02073623239994049,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0520732402801514,
581
- "logits/rejected": -2.0456154346466064,
582
- "logps/chosen": -33.327049255371094,
583
- "logps/rejected": -29.233760833740234,
584
- "loss": 0.6124,
585
- "rewards/accuracies": 0.7124999761581421,
586
- "rewards/chosen": 0.19495204091072083,
587
- "rewards/margins": 0.19935496151447296,
588
- "rewards/rejected": -0.004402949940413237,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.911370038986206,
595
- "logits/rejected": -1.9135433435440063,
596
- "logps/chosen": -33.83781814575195,
597
- "logps/rejected": -30.931133270263672,
598
- "loss": 0.5854,
599
  "rewards/accuracies": 0.762499988079071,
600
- "rewards/chosen": 0.2407412976026535,
601
- "rewards/margins": 0.27584755420684814,
602
- "rewards/rejected": -0.035106249153614044,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.6364745784115482,
610
- "train_runtime": 3251.6479,
611
- "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.5,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.866639494895935,
29
+ "logits/rejected": -1.870951533317566,
30
+ "logps/chosen": -36.991004943847656,
31
+ "logps/rejected": -33.64551544189453,
32
+ "loss": 0.4968,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.009355764836072922,
35
+ "rewards/margins": 0.018620232120156288,
36
+ "rewards/rejected": -0.009264465421438217,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9975944757461548,
43
+ "logits/rejected": -2.000239133834839,
44
+ "logps/chosen": -29.655420303344727,
45
+ "logps/rejected": -29.04937171936035,
46
+ "loss": 0.5034,
47
+ "rewards/accuracies": 0.44999998807907104,
48
+ "rewards/chosen": -0.007936288602650166,
49
+ "rewards/margins": -0.016377601772546768,
50
+ "rewards/rejected": 0.008441311307251453,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.920345664024353,
57
+ "logits/rejected": -1.917649507522583,
58
+ "logps/chosen": -31.40158462524414,
59
+ "logps/rejected": -33.22117614746094,
60
+ "loss": 0.499,
61
+ "rewards/accuracies": 0.550000011920929,
62
+ "rewards/chosen": 0.008706120774149895,
63
+ "rewards/margins": 0.010335424914956093,
64
+ "rewards/rejected": -0.0016293060034513474,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.016953706741333,
71
+ "logits/rejected": -2.008241891860962,
72
+ "logps/chosen": -32.55894088745117,
73
+ "logps/rejected": -32.48887634277344,
74
+ "loss": 0.501,
75
+ "rewards/accuracies": 0.550000011920929,
76
+ "rewards/chosen": 0.010718915611505508,
77
+ "rewards/margins": -0.0035588580649346113,
78
+ "rewards/rejected": 0.014277773909270763,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8621622323989868,
85
+ "logits/rejected": -1.851413369178772,
86
+ "logps/chosen": -33.5220832824707,
87
+ "logps/rejected": -35.4012565612793,
88
+ "loss": 0.5034,
89
+ "rewards/accuracies": 0.48750001192092896,
90
+ "rewards/chosen": 0.021240826696157455,
91
+ "rewards/margins": -0.009411826729774475,
92
+ "rewards/rejected": 0.03065265715122223,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9412288665771484,
99
+ "logits/rejected": -1.9431712627410889,
100
+ "logps/chosen": -32.506317138671875,
101
+ "logps/rejected": -33.202842712402344,
102
+ "loss": 0.4846,
103
+ "rewards/accuracies": 0.699999988079071,
104
+ "rewards/chosen": 0.05631808191537857,
105
+ "rewards/margins": 0.07519452273845673,
106
+ "rewards/rejected": -0.018876442685723305,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0726475715637207,
113
+ "logits/rejected": -2.077623128890991,
114
+ "logps/chosen": -33.90425491333008,
115
+ "logps/rejected": -36.56460189819336,
116
+ "loss": 0.4903,
117
+ "rewards/accuracies": 0.574999988079071,
118
+ "rewards/chosen": 0.05054063722491264,
119
+ "rewards/margins": 0.04797897860407829,
120
+ "rewards/rejected": 0.002561659086495638,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9338359832763672,
127
+ "logits/rejected": -1.9369510412216187,
128
+ "logps/chosen": -34.21551513671875,
129
+ "logps/rejected": -34.527488708496094,
130
+ "loss": 0.4797,
131
+ "rewards/accuracies": 0.5874999761581421,
132
+ "rewards/chosen": 0.1273767203092575,
133
+ "rewards/margins": 0.08842838555574417,
134
+ "rewards/rejected": 0.038948334753513336,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9420888423919678,
141
+ "logits/rejected": -1.9465935230255127,
142
+ "logps/chosen": -32.2774772644043,
143
+ "logps/rejected": -32.245147705078125,
144
+ "loss": 0.4887,
145
+ "rewards/accuracies": 0.625,
146
+ "rewards/chosen": 0.11396588385105133,
147
+ "rewards/margins": 0.04570900648832321,
148
+ "rewards/rejected": 0.06825686991214752,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.040524482727051,
155
+ "logits/rejected": -2.0385289192199707,
156
+ "logps/chosen": -32.03118133544922,
157
+ "logps/rejected": -31.205692291259766,
158
+ "loss": 0.4799,
159
+ "rewards/accuracies": 0.6875,
160
+ "rewards/chosen": 0.12579473853111267,
161
+ "rewards/margins": 0.08948288857936859,
162
+ "rewards/rejected": 0.036311857402324677,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.234907865524292,
168
+ "eval_logits/rejected": -2.2300779819488525,
169
+ "eval_logps/chosen": -33.90293884277344,
170
+ "eval_logps/rejected": -37.394290924072266,
171
+ "eval_loss": 0.4987158477306366,
172
+ "eval_rewards/accuracies": 0.5103820562362671,
173
+ "eval_rewards/chosen": 0.0789671465754509,
174
+ "eval_rewards/margins": 0.005569713190197945,
175
+ "eval_rewards/rejected": 0.0733974277973175,
176
+ "eval_runtime": 145.9867,
177
+ "eval_samples_per_second": 2.35,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9966247081756592,
185
+ "logits/rejected": -1.9942585229873657,
186
+ "logps/chosen": -33.005943298339844,
187
+ "logps/rejected": -33.908599853515625,
188
+ "loss": 0.4799,
189
+ "rewards/accuracies": 0.6000000238418579,
190
+ "rewards/chosen": 0.14272192120552063,
191
+ "rewards/margins": 0.07025818526744843,
192
+ "rewards/rejected": 0.07246376574039459,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0064611434936523,
199
+ "logits/rejected": -1.9981132745742798,
200
+ "logps/chosen": -32.16605758666992,
201
+ "logps/rejected": -31.968135833740234,
202
+ "loss": 0.4883,
203
+ "rewards/accuracies": 0.5,
204
+ "rewards/chosen": 0.16730833053588867,
205
+ "rewards/margins": 0.0512862391769886,
206
+ "rewards/rejected": 0.11602209508419037,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0353620052337646,
213
+ "logits/rejected": -2.0273985862731934,
214
+ "logps/chosen": -30.145355224609375,
215
+ "logps/rejected": -31.892078399658203,
216
+ "loss": 0.477,
217
+ "rewards/accuracies": 0.6000000238418579,
218
+ "rewards/chosen": 0.19821622967720032,
219
+ "rewards/margins": 0.10165951400995255,
220
+ "rewards/rejected": 0.09655670821666718,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.965121865272522,
227
+ "logits/rejected": -1.9753497838974,
228
+ "logps/chosen": -31.059722900390625,
229
+ "logps/rejected": -32.391807556152344,
230
+ "loss": 0.4695,
231
+ "rewards/accuracies": 0.6625000238418579,
232
+ "rewards/chosen": 0.21596479415893555,
233
+ "rewards/margins": 0.13012099266052246,
234
+ "rewards/rejected": 0.08584379404783249,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8772761821746826,
241
+ "logits/rejected": -1.8784434795379639,
242
+ "logps/chosen": -33.682029724121094,
243
+ "logps/rejected": -34.56599044799805,
244
+ "loss": 0.4566,
245
  "rewards/accuracies": 0.612500011920929,
246
+ "rewards/chosen": 0.30639737844467163,
247
+ "rewards/margins": 0.19163966178894043,
248
+ "rewards/rejected": 0.11475770175457001,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9283088445663452,
255
+ "logits/rejected": -1.9249660968780518,
256
+ "logps/chosen": -35.78284454345703,
257
+ "logps/rejected": -32.46989059448242,
258
+ "loss": 0.4802,
259
+ "rewards/accuracies": 0.6000000238418579,
260
+ "rewards/chosen": 0.23330166935920715,
261
+ "rewards/margins": 0.08158595860004425,
262
+ "rewards/rejected": 0.1517156958580017,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0310397148132324,
269
+ "logits/rejected": -2.0237185955047607,
270
+ "logps/chosen": -33.21892547607422,
271
+ "logps/rejected": -31.1767520904541,
272
+ "loss": 0.443,
273
+ "rewards/accuracies": 0.7124999761581421,
274
+ "rewards/chosen": 0.347359299659729,
275
+ "rewards/margins": 0.24229975044727325,
276
+ "rewards/rejected": 0.10505956411361694,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.037632703781128,
283
+ "logits/rejected": -2.0428080558776855,
284
+ "logps/chosen": -31.97260093688965,
285
+ "logps/rejected": -32.14643096923828,
286
+ "loss": 0.4662,
287
+ "rewards/accuracies": 0.6499999761581421,
288
+ "rewards/chosen": 0.34331104159355164,
289
+ "rewards/margins": 0.1400861144065857,
290
+ "rewards/rejected": 0.20322492718696594,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0373456478118896,
297
+ "logits/rejected": -2.0346312522888184,
298
+ "logps/chosen": -31.077922821044922,
299
+ "logps/rejected": -31.089244842529297,
300
+ "loss": 0.4725,
301
+ "rewards/accuracies": 0.6000000238418579,
302
+ "rewards/chosen": 0.24661760032176971,
303
+ "rewards/margins": 0.10923590511083603,
304
+ "rewards/rejected": 0.13738170266151428,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9075931310653687,
311
+ "logits/rejected": -1.9122419357299805,
312
+ "logps/chosen": -31.088512420654297,
313
+ "logps/rejected": -32.591243743896484,
314
+ "loss": 0.4548,
315
+ "rewards/accuracies": 0.7250000238418579,
316
+ "rewards/chosen": 0.3106623888015747,
317
+ "rewards/margins": 0.190850168466568,
318
+ "rewards/rejected": 0.11981221288442612,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.233458995819092,
324
+ "eval_logits/rejected": -2.2286527156829834,
325
+ "eval_logps/chosen": -33.769569396972656,
326
+ "eval_logps/rejected": -37.28458786010742,
327
+ "eval_loss": 0.4955619275569916,
328
  "eval_rewards/accuracies": 0.5340532064437866,
329
+ "eval_rewards/chosen": 0.15899108350276947,
330
+ "eval_rewards/margins": 0.019772635772824287,
331
+ "eval_rewards/rejected": 0.13921843469142914,
332
+ "eval_runtime": 145.6335,
333
+ "eval_samples_per_second": 2.355,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.020599365234375,
341
+ "logits/rejected": -2.0312187671661377,
342
+ "logps/chosen": -31.51775550842285,
343
+ "logps/rejected": -33.68301773071289,
344
+ "loss": 0.462,
345
+ "rewards/accuracies": 0.637499988079071,
346
+ "rewards/chosen": 0.2721289098262787,
347
+ "rewards/margins": 0.16431835293769836,
348
+ "rewards/rejected": 0.10781057924032211,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9131078720092773,
355
+ "logits/rejected": -1.9278091192245483,
356
+ "logps/chosen": -29.563827514648438,
357
+ "logps/rejected": -31.399028778076172,
358
+ "loss": 0.4463,
359
+ "rewards/accuracies": 0.7124999761581421,
360
+ "rewards/chosen": 0.3286263942718506,
361
+ "rewards/margins": 0.23313641548156738,
362
+ "rewards/rejected": 0.09548995643854141,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.969555139541626,
369
+ "logits/rejected": -1.9735543727874756,
370
+ "logps/chosen": -32.82552719116211,
371
+ "logps/rejected": -31.39508628845215,
372
+ "loss": 0.4396,
373
+ "rewards/accuracies": 0.7250000238418579,
374
+ "rewards/chosen": 0.3551639914512634,
375
+ "rewards/margins": 0.26263612508773804,
376
+ "rewards/rejected": 0.0925278514623642,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9689340591430664,
383
+ "logits/rejected": -1.947165846824646,
384
+ "logps/chosen": -33.572689056396484,
385
+ "logps/rejected": -34.84184265136719,
386
+ "loss": 0.4355,
387
+ "rewards/accuracies": 0.6875,
388
+ "rewards/chosen": 0.3486354947090149,
389
+ "rewards/margins": 0.2832663357257843,
390
+ "rewards/rejected": 0.0653691440820694,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0102758407592773,
397
+ "logits/rejected": -2.006990909576416,
398
+ "logps/chosen": -32.49989700317383,
399
+ "logps/rejected": -35.99126434326172,
400
+ "loss": 0.472,
401
+ "rewards/accuracies": 0.5625,
402
+ "rewards/chosen": 0.2551037669181824,
403
+ "rewards/margins": 0.11490776389837265,
404
+ "rewards/rejected": 0.1401960253715515,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.877345323562622,
411
+ "logits/rejected": -1.8749182224273682,
412
+ "logps/chosen": -33.7043571472168,
413
+ "logps/rejected": -35.259422302246094,
414
+ "loss": 0.4671,
415
  "rewards/accuracies": 0.6499999761581421,
416
+ "rewards/chosen": 0.2904246747493744,
417
+ "rewards/margins": 0.14330394566059113,
418
+ "rewards/rejected": 0.14712072908878326,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8626378774642944,
425
+ "logits/rejected": -1.8601328134536743,
426
+ "logps/chosen": -33.90363311767578,
427
+ "logps/rejected": -31.588598251342773,
428
+ "loss": 0.457,
429
+ "rewards/accuracies": 0.612500011920929,
430
+ "rewards/chosen": 0.29638105630874634,
431
+ "rewards/margins": 0.1867421716451645,
432
+ "rewards/rejected": 0.10963888466358185,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.965528130531311,
439
+ "logits/rejected": -1.9551260471343994,
440
+ "logps/chosen": -34.74420166015625,
441
+ "logps/rejected": -31.622207641601562,
442
+ "loss": 0.4442,
443
+ "rewards/accuracies": 0.6499999761581421,
444
+ "rewards/chosen": 0.3674633204936981,
445
+ "rewards/margins": 0.23619195818901062,
446
+ "rewards/rejected": 0.1312713772058487,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0614726543426514,
453
+ "logits/rejected": -2.0466229915618896,
454
+ "logps/chosen": -30.37356948852539,
455
+ "logps/rejected": -32.3351936340332,
456
+ "loss": 0.4671,
457
+ "rewards/accuracies": 0.6000000238418579,
458
+ "rewards/chosen": 0.33033472299575806,
459
+ "rewards/margins": 0.14034779369831085,
460
+ "rewards/rejected": 0.1899869292974472,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9327377080917358,
467
+ "logits/rejected": -1.9302759170532227,
468
+ "logps/chosen": -32.0928955078125,
469
+ "logps/rejected": -30.652236938476562,
470
+ "loss": 0.41,
471
+ "rewards/accuracies": 0.737500011920929,
472
+ "rewards/chosen": 0.5005279779434204,
473
+ "rewards/margins": 0.4017825126647949,
474
+ "rewards/rejected": 0.09874548017978668,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2308623790740967,
480
+ "eval_logits/rejected": -2.226059913635254,
481
+ "eval_logps/chosen": -33.76142501831055,
482
+ "eval_logps/rejected": -37.284828186035156,
483
+ "eval_loss": 0.4937135577201843,
484
+ "eval_rewards/accuracies": 0.5361295938491821,
485
+ "eval_rewards/chosen": 0.16387464106082916,
486
+ "eval_rewards/margins": 0.02479831501841545,
487
+ "eval_rewards/rejected": 0.1390763521194458,
488
+ "eval_runtime": 145.6766,
489
  "eval_samples_per_second": 2.355,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
 
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.916757345199585,
497
+ "logits/rejected": -1.9136326313018799,
498
+ "logps/chosen": -31.0240421295166,
499
+ "logps/rejected": -33.553138732910156,
500
+ "loss": 0.4465,
501
+ "rewards/accuracies": 0.6875,
502
+ "rewards/chosen": 0.3475847542285919,
503
+ "rewards/margins": 0.23417536914348602,
504
+ "rewards/rejected": 0.1134093850851059,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9669981002807617,
511
+ "logits/rejected": -1.9548496007919312,
512
+ "logps/chosen": -34.02072525024414,
513
+ "logps/rejected": -33.40859603881836,
514
+ "loss": 0.4379,
515
+ "rewards/accuracies": 0.75,
516
+ "rewards/chosen": 0.3452104926109314,
517
+ "rewards/margins": 0.26706355810165405,
518
+ "rewards/rejected": 0.07814691960811615,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.0023112297058105,
525
+ "logits/rejected": -2.0009615421295166,
526
+ "logps/chosen": -32.86361312866211,
527
+ "logps/rejected": -32.265541076660156,
528
+ "loss": 0.4439,
529
+ "rewards/accuracies": 0.699999988079071,
530
+ "rewards/chosen": 0.37330904603004456,
531
+ "rewards/margins": 0.24488253891468048,
532
+ "rewards/rejected": 0.12842652201652527,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0890090465545654,
539
+ "logits/rejected": -2.073373317718506,
540
+ "logps/chosen": -33.47917556762695,
541
+ "logps/rejected": -32.80946350097656,
542
+ "loss": 0.4446,
543
+ "rewards/accuracies": 0.675000011920929,
544
+ "rewards/chosen": 0.42013058066368103,
545
+ "rewards/margins": 0.23498812317848206,
546
+ "rewards/rejected": 0.18514248728752136,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9615952968597412,
553
+ "logits/rejected": -1.9607601165771484,
554
+ "logps/chosen": -32.57384490966797,
555
+ "logps/rejected": -32.25366973876953,
556
+ "loss": 0.4326,
557
+ "rewards/accuracies": 0.6875,
558
+ "rewards/chosen": 0.4249538779258728,
559
+ "rewards/margins": 0.2980697453022003,
560
+ "rewards/rejected": 0.12688413262367249,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9177497625350952,
567
+ "logits/rejected": -1.9280321598052979,
568
+ "logps/chosen": -31.600200653076172,
569
+ "logps/rejected": -35.003135681152344,
570
+ "loss": 0.4555,
571
+ "rewards/accuracies": 0.6499999761581421,
572
+ "rewards/chosen": 0.3626941442489624,
573
+ "rewards/margins": 0.19132137298583984,
574
+ "rewards/rejected": 0.17137278616428375,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.0559232234954834,
581
+ "logits/rejected": -2.0493996143341064,
582
+ "logps/chosen": -33.01537322998047,
583
+ "logps/rejected": -28.992477416992188,
584
+ "loss": 0.4418,
585
+ "rewards/accuracies": 0.7749999761581421,
586
+ "rewards/chosen": 0.3819587230682373,
587
+ "rewards/margins": 0.2415906935930252,
588
+ "rewards/rejected": 0.14036807417869568,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9163579940795898,
595
+ "logits/rejected": -1.9185268878936768,
596
+ "logps/chosen": -33.650962829589844,
597
+ "logps/rejected": -30.753406524658203,
598
+ "loss": 0.4369,
599
  "rewards/accuracies": 0.762499988079071,
600
+ "rewards/chosen": 0.3528522849082947,
601
+ "rewards/margins": 0.28131863474845886,
602
+ "rewards/rejected": 0.07153360545635223,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.46439293204963983,
610
+ "train_runtime": 3253.2697,
611
+ "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],