hugodk-sch commited on
Commit
f5f3a8e
1 Parent(s): 6a62e52

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +377 -377
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.0817
24
- - Rewards/chosen: 0.0048
25
- - Rewards/rejected: -0.0009
26
- - Rewards/accuracies: 0.5104
27
- - Rewards/margins: 0.0057
28
- - Logps/rejected: -37.5184
29
- - Logps/chosen: -34.0250
30
- - Logits/rejected: -2.2281
31
- - Logits/chosen: -2.2329
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.8581 | 0.26 | 100 | 1.0730 | 0.0088 | 0.0069 | 0.5199 | 0.0019 | -37.5028 | -34.0170 | -2.2326 | -2.2374 |
67
- | 0.899 | 0.52 | 200 | 1.0716 | -0.0015 | -0.0088 | 0.5071 | 0.0072 | -37.5342 | -34.0376 | -2.2306 | -2.2355 |
68
- | 0.6105 | 0.78 | 300 | 1.0315 | 0.0102 | -0.0099 | 0.5602 | 0.0201 | -37.5365 | -34.0141 | -2.2289 | -2.2338 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.9828
21
+ - Rewards/chosen: -0.0112
22
+ - Rewards/rejected: -0.0284
23
+ - Rewards/accuracies: 0.5395
24
+ - Rewards/margins: 0.0172
25
+ - Logps/rejected: -37.5734
26
+ - Logps/chosen: -34.0569
27
+ - Logits/rejected: -2.2180
28
+ - Logits/chosen: -2.2228
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.937 | 0.26 | 100 | 0.9921 | 0.0008 | -0.0073 | 0.5282 | 0.0081 | -37.5312 | -34.0329 | -2.2273 | -2.2322 |
64
+ | 0.8332 | 0.52 | 200 | 0.9839 | -0.0016 | -0.0178 | 0.5307 | 0.0162 | -37.5522 | -34.0377 | -2.2233 | -2.2281 |
65
+ | 0.7284 | 0.78 | 300 | 0.9828 | -0.0112 | -0.0284 | 0.5395 | 0.0172 | -37.5734 | -34.0569 | -2.2180 | -2.2228 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8417328de29b6b5905745ef824b91a690031b7aaa0073a0a0ba98affc3e9bbbf
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e9c83cfb196d121fd47bbe89f821e8f2c260e9d270801c4b868c49d6d3ab09c
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.232933521270752,
4
- "eval_logits/rejected": -2.2280824184417725,
5
- "eval_logps/chosen": -34.02503967285156,
6
- "eval_logps/rejected": -37.51841735839844,
7
- "eval_loss": 1.0816813707351685,
8
- "eval_rewards/accuracies": 0.5103820562362671,
9
- "eval_rewards/chosen": 0.004756772890686989,
10
- "eval_rewards/margins": 0.005652438849210739,
11
- "eval_rewards/rejected": -0.0008956658421084285,
12
- "eval_runtime": 145.5936,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.356,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.8446982934877469,
17
- "train_runtime": 3252.3399,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.8806782920639237,
4
+ "train_runtime": 3251.4322,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.8446982934877469,
4
- "train_runtime": 3252.3399,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.8806782920639237,
4
+ "train_runtime": 3251.4322,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -25,589 +25,589 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.866492748260498,
29
- "logits/rejected": -1.87080979347229,
30
- "logps/chosen": -36.97657775878906,
31
- "logps/rejected": -33.65824890136719,
32
- "loss": 0.9236,
33
- "rewards/accuracies": 0.5277777910232544,
34
- "rewards/chosen": 0.015009618364274502,
35
- "rewards/margins": 0.02909613959491253,
36
- "rewards/rejected": -0.014086521230638027,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9977840185165405,
43
- "logits/rejected": -2.000425100326538,
44
- "logps/chosen": -29.640512466430664,
45
- "logps/rejected": -29.048751831054688,
46
- "loss": 1.0528,
47
- "rewards/accuracies": 0.4375,
48
- "rewards/chosen": 0.0008407801506109536,
49
- "rewards/margins": -0.0065057664178311825,
50
- "rewards/rejected": 0.007346546743065119,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9210313558578491,
57
- "logits/rejected": -1.9183330535888672,
58
- "logps/chosen": -31.377187728881836,
59
- "logps/rejected": -33.214942932128906,
60
- "loss": 0.976,
61
- "rewards/accuracies": 0.550000011920929,
62
- "rewards/chosen": 0.019451653584837914,
63
- "rewards/margins": 0.01769269071519375,
64
- "rewards/rejected": 0.001758962869644165,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0173258781433105,
71
- "logits/rejected": -2.008592128753662,
72
- "logps/chosen": -32.55642318725586,
73
- "logps/rejected": -32.49436569213867,
74
- "loss": 1.0336,
75
- "rewards/accuracies": 0.4749999940395355,
76
- "rewards/chosen": 0.010191624984145164,
77
- "rewards/margins": 0.001039800001308322,
78
- "rewards/rejected": 0.009151825681328773,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8626506328582764,
85
- "logits/rejected": -1.8518873453140259,
86
- "logps/chosen": -33.54867172241211,
87
- "logps/rejected": -35.45621109008789,
88
- "loss": 1.0318,
89
- "rewards/accuracies": 0.5,
90
- "rewards/chosen": 0.004401391837745905,
91
- "rewards/margins": 0.006334079895168543,
92
- "rewards/rejected": -0.001932688057422638,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.940718650817871,
99
- "logits/rejected": -1.9426720142364502,
100
- "logps/chosen": -32.53395462036133,
101
- "logps/rejected": -33.20496368408203,
102
- "loss": 0.9445,
103
- "rewards/accuracies": 0.6499999761581421,
104
- "rewards/chosen": 0.03311632573604584,
105
- "rewards/margins": 0.04990752786397934,
106
- "rewards/rejected": -0.016791202127933502,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0724740028381348,
113
- "logits/rejected": -2.077458381652832,
114
- "logps/chosen": -33.9911994934082,
115
- "logps/rejected": -36.61388397216797,
116
- "loss": 1.1022,
117
- "rewards/accuracies": 0.5249999761581421,
118
- "rewards/chosen": -0.001358801149763167,
119
- "rewards/margins": 0.021143654361367226,
120
- "rewards/rejected": -0.022502455860376358,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.936197280883789,
127
- "logits/rejected": -1.9393237829208374,
128
- "logps/chosen": -34.28167724609375,
129
- "logps/rejected": -34.63819885253906,
130
- "loss": 0.8186,
131
- "rewards/accuracies": 0.699999988079071,
132
- "rewards/chosen": 0.07306724786758423,
133
- "rewards/margins": 0.09596274793148041,
134
- "rewards/rejected": -0.02289549633860588,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9451515674591064,
141
- "logits/rejected": -1.949669599533081,
142
- "logps/chosen": -32.39059066772461,
143
- "logps/rejected": -32.346839904785156,
144
- "loss": 1.0017,
145
- "rewards/accuracies": 0.637499988079071,
146
- "rewards/chosen": 0.03841521218419075,
147
- "rewards/margins": 0.03238191828131676,
148
- "rewards/rejected": 0.006033292505890131,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.042168617248535,
155
- "logits/rejected": -2.040160655975342,
156
- "logps/chosen": -32.12788772583008,
157
- "logps/rejected": -31.280298233032227,
158
- "loss": 0.8581,
159
- "rewards/accuracies": 0.612500011920929,
160
- "rewards/chosen": 0.05647433549165726,
161
- "rewards/margins": 0.06351961940526962,
162
- "rewards/rejected": -0.007045289967209101,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.237440824508667,
168
- "eval_logits/rejected": -2.232595682144165,
169
- "eval_logps/chosen": -34.017024993896484,
170
- "eval_logps/rejected": -37.50282287597656,
171
- "eval_loss": 1.0730067491531372,
172
- "eval_rewards/accuracies": 0.5199335813522339,
173
- "eval_rewards/chosen": 0.00876238290220499,
174
- "eval_rewards/margins": 0.001864485559053719,
175
- "eval_rewards/rejected": 0.0068978965282440186,
176
- "eval_runtime": 146.0042,
177
- "eval_samples_per_second": 2.349,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9984264373779297,
185
- "logits/rejected": -1.9960410594940186,
186
- "logps/chosen": -33.10862350463867,
187
- "logps/rejected": -34.00126266479492,
188
- "loss": 1.1665,
189
- "rewards/accuracies": 0.5874999761581421,
190
- "rewards/chosen": 0.0675957053899765,
191
- "rewards/margins": 0.05354113504290581,
192
- "rewards/rejected": 0.014054578728973866,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0099892616271973,
199
- "logits/rejected": -2.001642942428589,
200
- "logps/chosen": -32.312686920166016,
201
- "logps/rejected": -32.10304641723633,
202
- "loss": 0.9851,
203
- "rewards/accuracies": 0.5375000238418579,
204
- "rewards/chosen": 0.06610965728759766,
205
- "rewards/margins": 0.03688037022948265,
206
- "rewards/rejected": 0.029229288920760155,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0365984439849854,
213
- "logits/rejected": -2.0286123752593994,
214
- "logps/chosen": -30.3278865814209,
215
- "logps/rejected": -32.04685592651367,
216
- "loss": 0.9819,
217
- "rewards/accuracies": 0.625,
218
- "rewards/chosen": 0.07391555607318878,
219
- "rewards/margins": 0.07084138691425323,
220
- "rewards/rejected": 0.0030741647351533175,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9668314456939697,
227
- "logits/rejected": -1.9770755767822266,
228
- "logps/chosen": -31.2120304107666,
229
- "logps/rejected": -32.57902526855469,
230
- "loss": 0.8178,
231
- "rewards/accuracies": 0.7250000238418579,
232
- "rewards/chosen": 0.10381648689508438,
233
- "rewards/margins": 0.12588787078857422,
234
- "rewards/rejected": -0.02207140065729618,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8799388408660889,
241
- "logits/rejected": -1.881087064743042,
242
- "logps/chosen": -33.97100830078125,
243
- "logps/rejected": -34.84876251220703,
244
- "loss": 0.8351,
245
- "rewards/accuracies": 0.6625000238418579,
246
- "rewards/chosen": 0.11084038019180298,
247
- "rewards/margins": 0.15659382939338684,
248
- "rewards/rejected": -0.04575346037745476,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9326753616333008,
255
- "logits/rejected": -1.9292488098144531,
256
- "logps/chosen": -36.040306091308594,
257
- "logps/rejected": -32.739051818847656,
258
- "loss": 0.8539,
259
- "rewards/accuracies": 0.612500011920929,
260
- "rewards/chosen": 0.0656842365860939,
261
- "rewards/margins": 0.07383431494235992,
262
- "rewards/rejected": -0.008150083012878895,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0338892936706543,
269
- "logits/rejected": -2.026510238647461,
270
- "logps/chosen": -33.518821716308594,
271
- "logps/rejected": -31.37355613708496,
272
- "loss": 0.7141,
273
- "rewards/accuracies": 0.7250000238418579,
274
- "rewards/chosen": 0.13951851427555084,
275
- "rewards/margins": 0.15037165582180023,
276
- "rewards/rejected": -0.010853144340217113,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.039998769760132,
283
- "logits/rejected": -2.045238733291626,
284
- "logps/chosen": -32.28400421142578,
285
- "logps/rejected": -32.450523376464844,
286
- "loss": 0.7652,
287
- "rewards/accuracies": 0.675000011920929,
288
- "rewards/chosen": 0.13039177656173706,
289
- "rewards/margins": 0.11308407783508301,
290
- "rewards/rejected": 0.017307698726654053,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.0408711433410645,
297
- "logits/rejected": -2.0380892753601074,
298
- "logps/chosen": -31.287479400634766,
299
- "logps/rejected": -31.33124351501465,
300
- "loss": 0.8251,
301
- "rewards/accuracies": 0.699999988079071,
302
- "rewards/chosen": 0.10073776543140411,
303
- "rewards/margins": 0.10725078731775284,
304
- "rewards/rejected": -0.0065130265429615974,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9110311269760132,
311
- "logits/rejected": -1.915704369544983,
312
- "logps/chosen": -31.336145401000977,
313
- "logps/rejected": -32.791221618652344,
314
- "loss": 0.899,
315
- "rewards/accuracies": 0.6625000238418579,
316
- "rewards/chosen": 0.13506832718849182,
317
- "rewards/margins": 0.13521215319633484,
318
- "rewards/rejected": -0.0001438349427189678,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2354750633239746,
324
- "eval_logits/rejected": -2.2306265830993652,
325
- "eval_logps/chosen": -34.03763198852539,
326
- "eval_logps/rejected": -37.534156799316406,
327
- "eval_loss": 1.0715795755386353,
328
- "eval_rewards/accuracies": 0.5070598125457764,
329
- "eval_rewards/chosen": -0.0015398082323372364,
330
- "eval_rewards/margins": 0.007228231523185968,
331
- "eval_rewards/rejected": -0.008768039755523205,
332
- "eval_runtime": 145.8306,
333
- "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.0229578018188477,
341
- "logits/rejected": -2.033618450164795,
342
- "logps/chosen": -31.776050567626953,
343
- "logps/rejected": -33.90400314331055,
344
- "loss": 0.7465,
345
- "rewards/accuracies": 0.7250000238418579,
346
- "rewards/chosen": 0.09762789309024811,
347
- "rewards/margins": 0.11827856302261353,
348
- "rewards/rejected": -0.020650675520300865,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9135267734527588,
355
- "logits/rejected": -1.9282987117767334,
356
- "logps/chosen": -29.896175384521484,
357
- "logps/rejected": -31.5633487701416,
358
- "loss": 0.75,
359
- "rewards/accuracies": 0.675000011920929,
360
- "rewards/chosen": 0.10768184810876846,
361
- "rewards/margins": 0.11026783287525177,
362
- "rewards/rejected": -0.002585983369499445,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9711973667144775,
369
- "logits/rejected": -1.9751732349395752,
370
- "logps/chosen": -33.15174102783203,
371
- "logps/rejected": -31.605077743530273,
372
- "loss": 0.7191,
373
- "rewards/accuracies": 0.7250000238418579,
374
- "rewards/chosen": 0.1328631192445755,
375
- "rewards/margins": 0.1607515811920166,
376
- "rewards/rejected": -0.027888456359505653,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.969957709312439,
383
- "logits/rejected": -1.9480478763580322,
384
- "logps/chosen": -33.9122200012207,
385
- "logps/rejected": -35.02121353149414,
386
- "loss": 0.6955,
387
- "rewards/accuracies": 0.75,
388
- "rewards/chosen": 0.12076646089553833,
389
- "rewards/margins": 0.15597540140151978,
390
- "rewards/rejected": -0.03520893678069115,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.0103070735931396,
397
- "logits/rejected": -2.006990909576416,
398
- "logps/chosen": -32.72673797607422,
399
- "logps/rejected": -36.23841094970703,
400
- "loss": 0.8178,
401
- "rewards/accuracies": 0.7250000238418579,
402
- "rewards/chosen": 0.09916610270738602,
403
- "rewards/margins": 0.10590960830450058,
404
- "rewards/rejected": -0.006743511650711298,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8776795864105225,
411
- "logits/rejected": -1.875245451927185,
412
- "logps/chosen": -34.003971099853516,
413
- "logps/rejected": -35.510765075683594,
414
- "loss": 0.889,
415
- "rewards/accuracies": 0.6625000238418579,
416
- "rewards/chosen": 0.09221391379833221,
417
- "rewards/margins": 0.09528535604476929,
418
- "rewards/rejected": -0.0030714483000338078,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.863521933555603,
425
- "logits/rejected": -1.8610206842422485,
426
- "logps/chosen": -34.20132827758789,
427
- "logps/rejected": -31.76943016052246,
428
- "loss": 0.8268,
429
- "rewards/accuracies": 0.7124999761581421,
430
- "rewards/chosen": 0.09813599288463593,
431
- "rewards/margins": 0.09718601405620575,
432
- "rewards/rejected": 0.000949984765611589,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9676616191864014,
439
- "logits/rejected": -1.957082748413086,
440
- "logps/chosen": -35.020606994628906,
441
- "logps/rejected": -31.831247329711914,
442
- "loss": 0.6669,
443
- "rewards/accuracies": 0.762499988079071,
444
- "rewards/chosen": 0.168013796210289,
445
- "rewards/margins": 0.16314153373241425,
446
- "rewards/rejected": 0.00487226527184248,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0636610984802246,
453
- "logits/rejected": -2.0486764907836914,
454
- "logps/chosen": -30.683029174804688,
455
- "logps/rejected": -32.61827850341797,
456
- "loss": 0.894,
457
- "rewards/accuracies": 0.6625000238418579,
458
- "rewards/chosen": 0.12054909765720367,
459
- "rewards/margins": 0.10377003997564316,
460
- "rewards/rejected": 0.016779040917754173,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9357779026031494,
467
- "logits/rejected": -1.9332023859024048,
468
- "logps/chosen": -32.54056930541992,
469
- "logps/rejected": -30.850332260131836,
470
- "loss": 0.6105,
471
- "rewards/accuracies": 0.737500011920929,
472
- "rewards/chosen": 0.1932690441608429,
473
- "rewards/margins": 0.21003055572509766,
474
- "rewards/rejected": -0.01676151715219021,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.233783006668091,
480
- "eval_logits/rejected": -2.228926658630371,
481
- "eval_logps/chosen": -34.01414108276367,
482
- "eval_logps/rejected": -37.53648376464844,
483
- "eval_loss": 1.0314662456512451,
484
- "eval_rewards/accuracies": 0.560215950012207,
485
- "eval_rewards/chosen": 0.010205330327153206,
486
- "eval_rewards/margins": 0.020134516060352325,
487
- "eval_rewards/rejected": -0.009929186664521694,
488
- "eval_runtime": 145.6316,
489
- "eval_samples_per_second": 2.355,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9184128046035767,
497
- "logits/rejected": -1.9151279926300049,
498
- "logps/chosen": -31.325061798095703,
499
- "logps/rejected": -33.77220916748047,
500
- "loss": 0.7283,
501
- "rewards/accuracies": 0.75,
502
- "rewards/chosen": 0.13914386928081512,
503
- "rewards/margins": 0.15417365729808807,
504
- "rewards/rejected": -0.01502978801727295,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.968726396560669,
511
- "logits/rejected": -1.9564485549926758,
512
- "logps/chosen": -34.366207122802734,
513
- "logps/rejected": -33.61689376831055,
514
- "loss": 0.6804,
515
- "rewards/accuracies": 0.699999988079071,
516
- "rewards/chosen": 0.11493507772684097,
517
- "rewards/margins": 0.1539594680070877,
518
- "rewards/rejected": -0.03902440145611763,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -2.0041136741638184,
525
- "logits/rejected": -2.002657651901245,
526
- "logps/chosen": -33.25464630126953,
527
- "logps/rejected": -32.49077606201172,
528
- "loss": 0.7724,
529
- "rewards/accuracies": 0.75,
530
- "rewards/chosen": 0.11557211726903915,
531
- "rewards/margins": 0.12116815894842148,
532
- "rewards/rejected": -0.005596047732979059,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0917208194732666,
539
- "logits/rejected": -2.0759525299072266,
540
- "logps/chosen": -33.83209991455078,
541
- "logps/rejected": -33.08992004394531,
542
- "loss": 0.7606,
543
- "rewards/accuracies": 0.75,
544
- "rewards/chosen": 0.17364642024040222,
545
- "rewards/margins": 0.15958845615386963,
546
- "rewards/rejected": 0.014057991094887257,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9635206460952759,
553
- "logits/rejected": -1.9625988006591797,
554
- "logps/chosen": -32.91681671142578,
555
- "logps/rejected": -32.46485137939453,
556
- "loss": 0.7913,
557
- "rewards/accuracies": 0.7124999761581421,
558
- "rewards/chosen": 0.18264031410217285,
559
- "rewards/margins": 0.182492196559906,
560
- "rewards/rejected": 0.00014809667482040823,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9189882278442383,
567
- "logits/rejected": -1.9293220043182373,
568
- "logps/chosen": -31.9406681060791,
569
- "logps/rejected": -35.306640625,
570
- "loss": 0.7574,
571
- "rewards/accuracies": 0.699999988079071,
572
- "rewards/chosen": 0.1320098638534546,
573
- "rewards/margins": 0.1409510374069214,
574
- "rewards/rejected": -0.008941170759499073,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0582680702209473,
581
- "logits/rejected": -2.051753520965576,
582
- "logps/chosen": -33.39839553833008,
583
- "logps/rejected": -29.18343734741211,
584
- "loss": 0.7663,
585
- "rewards/accuracies": 0.675000011920929,
586
- "rewards/chosen": 0.12678876519203186,
587
- "rewards/margins": 0.1052960604429245,
588
- "rewards/rejected": 0.02149270847439766,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9175922870635986,
595
- "logits/rejected": -1.9197555780410767,
596
- "logps/chosen": -33.878089904785156,
597
- "logps/rejected": -30.871530532836914,
598
- "loss": 0.6999,
599
- "rewards/accuracies": 0.737500011920929,
600
- "rewards/chosen": 0.18047982454299927,
601
- "rewards/margins": 0.17993128299713135,
602
- "rewards/rejected": 0.0005485474830493331,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.8446982934877469,
610
- "train_runtime": 3252.3399,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8664768934249878,
29
+ "logits/rejected": -1.8707948923110962,
30
+ "logps/chosen": -36.978759765625,
31
+ "logps/rejected": -33.655174255371094,
32
+ "loss": 0.9735,
33
+ "rewards/accuracies": 0.5555555820465088,
34
+ "rewards/chosen": 0.01391848549246788,
35
+ "rewards/margins": 0.026468267664313316,
36
+ "rewards/rejected": -0.012549781240522861,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9980016946792603,
43
+ "logits/rejected": -2.000636339187622,
44
+ "logps/chosen": -29.645702362060547,
45
+ "logps/rejected": -29.045913696289062,
46
+ "loss": 1.0105,
47
+ "rewards/accuracies": 0.38749998807907104,
48
+ "rewards/chosen": -0.0017539471155032516,
49
+ "rewards/margins": -0.010518571361899376,
50
+ "rewards/rejected": 0.00876462459564209,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.920964002609253,
57
+ "logits/rejected": -1.9182822704315186,
58
+ "logps/chosen": -31.409515380859375,
59
+ "logps/rejected": -33.23186492919922,
60
+ "loss": 0.99,
61
+ "rewards/accuracies": 0.5249999761581421,
62
+ "rewards/chosen": 0.0032895475160330534,
63
+ "rewards/margins": 0.009993741288781166,
64
+ "rewards/rejected": -0.006704193539917469,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0180246829986572,
71
+ "logits/rejected": -2.009291410446167,
72
+ "logps/chosen": -32.584468841552734,
73
+ "logps/rejected": -32.50810623168945,
74
+ "loss": 1.0061,
75
+ "rewards/accuracies": 0.44999998807907104,
76
+ "rewards/chosen": -0.0038314342964440584,
77
+ "rewards/margins": -0.006112524773925543,
78
+ "rewards/rejected": 0.002281090710312128,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8629133701324463,
85
+ "logits/rejected": -1.852142572402954,
86
+ "logps/chosen": -33.5595703125,
87
+ "logps/rejected": -35.455909729003906,
88
+ "loss": 0.9993,
89
+ "rewards/accuracies": 0.42500001192092896,
90
+ "rewards/chosen": -0.0010461390484124422,
91
+ "rewards/margins": 0.0007355377310886979,
92
+ "rewards/rejected": -0.0017816766630858183,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9417911767959595,
99
+ "logits/rejected": -1.9437233209609985,
100
+ "logps/chosen": -32.56698989868164,
101
+ "logps/rejected": -33.20896911621094,
102
+ "loss": 0.9691,
103
+ "rewards/accuracies": 0.5625,
104
+ "rewards/chosen": 0.01659708470106125,
105
+ "rewards/margins": 0.03539115563035011,
106
+ "rewards/rejected": -0.018794070929288864,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0726003646850586,
113
+ "logits/rejected": -2.077578544616699,
114
+ "logps/chosen": -33.98097610473633,
115
+ "logps/rejected": -36.61317825317383,
116
+ "loss": 0.9741,
117
+ "rewards/accuracies": 0.5625,
118
+ "rewards/chosen": 0.0037565291859209538,
119
+ "rewards/margins": 0.02590828575193882,
120
+ "rewards/rejected": -0.022151757031679153,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9339592456817627,
127
+ "logits/rejected": -1.9370877742767334,
128
+ "logps/chosen": -34.31932830810547,
129
+ "logps/rejected": -34.65187454223633,
130
+ "loss": 0.9169,
131
+ "rewards/accuracies": 0.6499999761581421,
132
+ "rewards/chosen": 0.05424312874674797,
133
+ "rewards/margins": 0.08397696167230606,
134
+ "rewards/rejected": -0.029733825474977493,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9409306049346924,
141
+ "logits/rejected": -1.9454383850097656,
142
+ "logps/chosen": -32.3923454284668,
143
+ "logps/rejected": -32.371299743652344,
144
+ "loss": 0.9563,
145
+ "rewards/accuracies": 0.5625,
146
+ "rewards/chosen": 0.037537358701229095,
147
+ "rewards/margins": 0.043734077364206314,
148
+ "rewards/rejected": -0.006196716334670782,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.037719964981079,
155
+ "logits/rejected": -2.0357348918914795,
156
+ "logps/chosen": -32.144508361816406,
157
+ "logps/rejected": -31.295955657958984,
158
+ "loss": 0.937,
159
+ "rewards/accuracies": 0.637499988079071,
160
+ "rewards/chosen": 0.04816383868455887,
161
+ "rewards/margins": 0.06303636729717255,
162
+ "rewards/rejected": -0.014872525818645954,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2321972846984863,
168
+ "eval_logits/rejected": -2.2273480892181396,
169
+ "eval_logps/chosen": -34.032928466796875,
170
+ "eval_logps/rejected": -37.53125,
171
+ "eval_loss": 0.9921085834503174,
172
+ "eval_rewards/accuracies": 0.5282392501831055,
173
+ "eval_rewards/chosen": 0.000813809223473072,
174
+ "eval_rewards/margins": 0.008130725473165512,
175
+ "eval_rewards/rejected": -0.007316915784031153,
176
+ "eval_runtime": 145.8006,
177
+ "eval_samples_per_second": 2.353,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9925286769866943,
185
+ "logits/rejected": -1.9901431798934937,
186
+ "logps/chosen": -33.11371994018555,
187
+ "logps/rejected": -34.028839111328125,
188
+ "loss": 0.9352,
189
+ "rewards/accuracies": 0.6000000238418579,
190
+ "rewards/chosen": 0.06504794210195541,
191
+ "rewards/margins": 0.06478062272071838,
192
+ "rewards/rejected": 0.0002673208655323833,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0043208599090576,
199
+ "logits/rejected": -1.9959907531738281,
200
+ "logps/chosen": -32.34038162231445,
201
+ "logps/rejected": -32.120628356933594,
202
+ "loss": 0.9686,
203
+ "rewards/accuracies": 0.5249999761581421,
204
+ "rewards/chosen": 0.05226168781518936,
205
+ "rewards/margins": 0.03182316944003105,
206
+ "rewards/rejected": 0.02043851651251316,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.031864881515503,
213
+ "logits/rejected": -2.023920774459839,
214
+ "logps/chosen": -30.3155460357666,
215
+ "logps/rejected": -32.07514190673828,
216
+ "loss": 0.9237,
217
+ "rewards/accuracies": 0.574999988079071,
218
+ "rewards/chosen": 0.08008603006601334,
219
+ "rewards/margins": 0.09115206450223923,
220
+ "rewards/rejected": -0.011066052131354809,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9618200063705444,
227
+ "logits/rejected": -1.9720451831817627,
228
+ "logps/chosen": -31.228313446044922,
229
+ "logps/rejected": -32.562313079833984,
230
+ "loss": 0.8906,
231
+ "rewards/accuracies": 0.675000011920929,
232
+ "rewards/chosen": 0.09567264467477798,
233
+ "rewards/margins": 0.10938718169927597,
234
+ "rewards/rejected": -0.01371453981846571,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8728482723236084,
241
+ "logits/rejected": -1.874018669128418,
242
+ "logps/chosen": -33.88795852661133,
243
+ "logps/rejected": -34.77352523803711,
244
+ "loss": 0.852,
245
+ "rewards/accuracies": 0.637499988079071,
246
+ "rewards/chosen": 0.15236759185791016,
247
+ "rewards/margins": 0.16050228476524353,
248
+ "rewards/rejected": -0.00813469011336565,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.923365592956543,
255
+ "logits/rejected": -1.9199578762054443,
256
+ "logps/chosen": -35.98258590698242,
257
+ "logps/rejected": -32.678192138671875,
258
+ "loss": 0.9277,
259
+ "rewards/accuracies": 0.625,
260
+ "rewards/chosen": 0.09454482793807983,
261
+ "rewards/margins": 0.07226438820362091,
262
+ "rewards/rejected": 0.02228044718503952,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0240864753723145,
269
+ "logits/rejected": -2.0167737007141113,
270
+ "logps/chosen": -33.471466064453125,
271
+ "logps/rejected": -31.40371322631836,
272
+ "loss": 0.8154,
273
+ "rewards/accuracies": 0.6625000238418579,
274
+ "rewards/chosen": 0.16319802403450012,
275
+ "rewards/margins": 0.18913087248802185,
276
+ "rewards/rejected": -0.025932863354682922,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0294885635375977,
283
+ "logits/rejected": -2.034759044647217,
284
+ "logps/chosen": -32.19446563720703,
285
+ "logps/rejected": -32.4196891784668,
286
+ "loss": 0.8617,
287
+ "rewards/accuracies": 0.6875,
288
+ "rewards/chosen": 0.17516280710697174,
289
+ "rewards/margins": 0.1424373984336853,
290
+ "rewards/rejected": 0.03272542357444763,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0309464931488037,
297
+ "logits/rejected": -2.028186798095703,
298
+ "logps/chosen": -31.26650619506836,
299
+ "logps/rejected": -31.340648651123047,
300
+ "loss": 0.8776,
301
+ "rewards/accuracies": 0.6875,
302
+ "rewards/chosen": 0.1112222671508789,
303
+ "rewards/margins": 0.1224404126405716,
304
+ "rewards/rejected": -0.011218142695724964,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9006068706512451,
311
+ "logits/rejected": -1.9052391052246094,
312
+ "logps/chosen": -31.295886993408203,
313
+ "logps/rejected": -32.81926345825195,
314
+ "loss": 0.8332,
315
+ "rewards/accuracies": 0.737500011920929,
316
+ "rewards/chosen": 0.15519794821739197,
317
+ "rewards/margins": 0.16936282813549042,
318
+ "rewards/rejected": -0.01416487991809845,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2281434535980225,
324
+ "eval_logits/rejected": -2.223317861557007,
325
+ "eval_logps/chosen": -34.03767013549805,
326
+ "eval_logps/rejected": -37.552223205566406,
327
+ "eval_loss": 0.983903169631958,
328
+ "eval_rewards/accuracies": 0.5307309031486511,
329
+ "eval_rewards/chosen": -0.001558887423016131,
330
+ "eval_rewards/margins": 0.016243286430835724,
331
+ "eval_rewards/rejected": -0.017802175134420395,
332
+ "eval_runtime": 145.6923,
333
+ "eval_samples_per_second": 2.354,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.013721466064453,
341
+ "logits/rejected": -2.0243561267852783,
342
+ "logps/chosen": -31.78135108947754,
343
+ "logps/rejected": -33.93622970581055,
344
+ "loss": 0.8758,
345
+ "rewards/accuracies": 0.675000011920929,
346
+ "rewards/chosen": 0.09497732669115067,
347
+ "rewards/margins": 0.1317446529865265,
348
+ "rewards/rejected": -0.036767326295375824,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9061295986175537,
355
+ "logits/rejected": -1.9209282398223877,
356
+ "logps/chosen": -29.796966552734375,
357
+ "logps/rejected": -31.621959686279297,
358
+ "loss": 0.8123,
359
+ "rewards/accuracies": 0.7124999761581421,
360
+ "rewards/chosen": 0.15728648006916046,
361
+ "rewards/margins": 0.18917784094810486,
362
+ "rewards/rejected": -0.03189137205481529,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9621093273162842,
369
+ "logits/rejected": -1.9660680294036865,
370
+ "logps/chosen": -33.10811233520508,
371
+ "logps/rejected": -31.64517593383789,
372
+ "loss": 0.8222,
373
+ "rewards/accuracies": 0.675000011920929,
374
+ "rewards/chosen": 0.15467897057533264,
375
+ "rewards/margins": 0.20261602103710175,
376
+ "rewards/rejected": -0.04793703928589821,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9597173929214478,
383
+ "logits/rejected": -1.9379403591156006,
384
+ "logps/chosen": -33.83074188232422,
385
+ "logps/rejected": -35.11478042602539,
386
+ "loss": 0.7755,
387
+ "rewards/accuracies": 0.7124999761581421,
388
+ "rewards/chosen": 0.16150443255901337,
389
+ "rewards/margins": 0.24349848926067352,
390
+ "rewards/rejected": -0.08199407160282135,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.001469135284424,
397
+ "logits/rejected": -1.998147964477539,
398
+ "logps/chosen": -32.717472076416016,
399
+ "logps/rejected": -36.230865478515625,
400
+ "loss": 0.8965,
401
+ "rewards/accuracies": 0.5375000238418579,
402
+ "rewards/chosen": 0.10379841178655624,
403
+ "rewards/margins": 0.10677065700292587,
404
+ "rewards/rejected": -0.0029722482431679964,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8687664270401,
411
+ "logits/rejected": -1.8663409948349,
412
+ "logps/chosen": -33.96764373779297,
413
+ "logps/rejected": -35.5003776550293,
414
+ "loss": 0.8917,
415
+ "rewards/accuracies": 0.637499988079071,
416
+ "rewards/chosen": 0.11037871986627579,
417
+ "rewards/margins": 0.10825767368078232,
418
+ "rewards/rejected": 0.0021210461854934692,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8529716730117798,
425
+ "logits/rejected": -1.8505760431289673,
426
+ "logps/chosen": -34.18817901611328,
427
+ "logps/rejected": -31.831729888916016,
428
+ "loss": 0.8701,
429
+ "rewards/accuracies": 0.574999988079071,
430
+ "rewards/chosen": 0.1047089546918869,
431
+ "rewards/margins": 0.1349087655544281,
432
+ "rewards/rejected": -0.0301998071372509,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9559904336929321,
439
+ "logits/rejected": -1.9455276727676392,
440
+ "logps/chosen": -34.98174285888672,
441
+ "logps/rejected": -31.862768173217773,
442
+ "loss": 0.8017,
443
+ "rewards/accuracies": 0.7250000238418579,
444
+ "rewards/chosen": 0.18744894862174988,
445
+ "rewards/margins": 0.1983364075422287,
446
+ "rewards/rejected": -0.010887444019317627,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0518603324890137,
453
+ "logits/rejected": -2.0369694232940674,
454
+ "logps/chosen": -30.698474884033203,
455
+ "logps/rejected": -32.61089324951172,
456
+ "loss": 0.9111,
457
+ "rewards/accuracies": 0.612500011920929,
458
+ "rewards/chosen": 0.11282563209533691,
459
+ "rewards/margins": 0.09235484153032303,
460
+ "rewards/rejected": 0.02047080174088478,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9218803644180298,
467
+ "logits/rejected": -1.9193775653839111,
468
+ "logps/chosen": -32.38130569458008,
469
+ "logps/rejected": -30.906925201416016,
470
+ "loss": 0.7284,
471
+ "rewards/accuracies": 0.800000011920929,
472
+ "rewards/chosen": 0.27290263772010803,
473
+ "rewards/margins": 0.3179616630077362,
474
+ "rewards/rejected": -0.04505903273820877,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.222837209701538,
480
+ "eval_logits/rejected": -2.218010663986206,
481
+ "eval_logps/chosen": -34.05692672729492,
482
+ "eval_logps/rejected": -37.573402404785156,
483
+ "eval_loss": 0.9828158617019653,
484
+ "eval_rewards/accuracies": 0.5394518375396729,
485
+ "eval_rewards/chosen": -0.011187080293893814,
486
+ "eval_rewards/margins": 0.017203383147716522,
487
+ "eval_rewards/rejected": -0.028390465304255486,
488
+ "eval_runtime": 145.822,
489
+ "eval_samples_per_second": 2.352,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9078595638275146,
497
+ "logits/rejected": -1.90461003780365,
498
+ "logps/chosen": -31.29986000061035,
499
+ "logps/rejected": -33.81876754760742,
500
+ "loss": 0.8259,
501
+ "rewards/accuracies": 0.737500011920929,
502
+ "rewards/chosen": 0.15174436569213867,
503
+ "rewards/margins": 0.19005274772644043,
504
+ "rewards/rejected": -0.03830839321017265,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.957113265991211,
511
+ "logits/rejected": -1.9449272155761719,
512
+ "logps/chosen": -34.27944564819336,
513
+ "logps/rejected": -33.6619873046875,
514
+ "loss": 0.7968,
515
+ "rewards/accuracies": 0.6499999761581421,
516
+ "rewards/chosen": 0.15831336379051208,
517
+ "rewards/margins": 0.2198868989944458,
518
+ "rewards/rejected": -0.06157354637980461,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9918349981307983,
525
+ "logits/rejected": -1.9904123544692993,
526
+ "logps/chosen": -33.15383529663086,
527
+ "logps/rejected": -32.51991653442383,
528
+ "loss": 0.8186,
529
+ "rewards/accuracies": 0.6875,
530
+ "rewards/chosen": 0.16597984731197357,
531
+ "rewards/margins": 0.18614766001701355,
532
+ "rewards/rejected": -0.020167803391814232,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0784294605255127,
539
+ "logits/rejected": -2.0627849102020264,
540
+ "logps/chosen": -33.7851448059082,
541
+ "logps/rejected": -33.072959899902344,
542
+ "loss": 0.8271,
543
+ "rewards/accuracies": 0.675000011920929,
544
+ "rewards/chosen": 0.1971231997013092,
545
+ "rewards/margins": 0.17458374798297882,
546
+ "rewards/rejected": 0.022539447993040085,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9518101215362549,
553
+ "logits/rejected": -1.9509942531585693,
554
+ "logps/chosen": -32.82368850708008,
555
+ "logps/rejected": -32.53012466430664,
556
+ "loss": 0.7648,
557
+ "rewards/accuracies": 0.6875,
558
+ "rewards/chosen": 0.22920279204845428,
559
+ "rewards/margins": 0.2616921663284302,
560
+ "rewards/rejected": -0.03248937055468559,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9068868160247803,
567
+ "logits/rejected": -1.91714608669281,
568
+ "logps/chosen": -31.853801727294922,
569
+ "logps/rejected": -35.30488967895508,
570
+ "loss": 0.8248,
571
+ "rewards/accuracies": 0.637499988079071,
572
+ "rewards/chosen": 0.17544391751289368,
573
+ "rewards/margins": 0.18350856006145477,
574
+ "rewards/rejected": -0.008064627647399902,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.046671152114868,
581
+ "logits/rejected": -2.040235996246338,
582
+ "logps/chosen": -33.29608917236328,
583
+ "logps/rejected": -29.24057388305664,
584
+ "loss": 0.8215,
585
+ "rewards/accuracies": 0.699999988079071,
586
+ "rewards/chosen": 0.17794036865234375,
587
+ "rewards/margins": 0.1850142776966095,
588
+ "rewards/rejected": -0.007073921151459217,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9064124822616577,
595
+ "logits/rejected": -1.908599853515625,
596
+ "logps/chosen": -33.871421813964844,
597
+ "logps/rejected": -30.974201202392578,
598
+ "loss": 0.7824,
599
+ "rewards/accuracies": 0.7124999761581421,
600
+ "rewards/chosen": 0.18381480872631073,
601
+ "rewards/margins": 0.23460063338279724,
602
+ "rewards/rejected": -0.05078582838177681,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.8806782920639237,
610
+ "train_runtime": 3251.4322,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }