hugodk-sch commited on
Commit
bf3851f
1 Parent(s): 2d61f63

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +377 -377
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.6931
24
- - Rewards/chosen: -0.0260
25
- - Rewards/rejected: -0.0334
26
- - Rewards/accuracies: 0.5071
27
- - Rewards/margins: 0.0074
28
- - Logps/rejected: -37.6001
29
- - Logps/chosen: -34.0996
30
- - Logits/rejected: -2.2227
31
- - Logits/chosen: -2.2275
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.6726 | 0.26 | 100 | 0.6903 | -0.0039 | -0.0143 | 0.5482 | 0.0105 | -37.5524 | -34.0442 | -2.2286 | -2.2334 |
67
- | 0.6419 | 0.52 | 200 | 0.6932 | -0.0126 | -0.0196 | 0.5042 | 0.0071 | -37.5657 | -34.0660 | -2.2261 | -2.2310 |
68
- | 0.5902 | 0.78 | 300 | 0.6926 | -0.0248 | -0.0328 | 0.4954 | 0.0080 | -37.5987 | -34.0966 | -2.2231 | -2.2279 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.4962
21
+ - Rewards/chosen: 0.1141
22
+ - Rewards/rejected: 0.0985
23
+ - Rewards/accuracies: 0.5357
24
+ - Rewards/margins: 0.0156
25
+ - Logps/rejected: -37.2704
26
+ - Logps/chosen: -33.7494
27
+ - Logits/rejected: -2.2270
28
+ - Logits/chosen: -2.2318
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.4845 | 0.26 | 100 | 0.4982 | 0.0465 | 0.0373 | 0.5511 | 0.0092 | -37.4234 | -33.9184 | -2.2298 | -2.2347 |
64
+ | 0.466 | 0.52 | 200 | 0.4965 | 0.1019 | 0.0871 | 0.5453 | 0.0148 | -37.2989 | -33.7799 | -2.2288 | -2.2337 |
65
+ | 0.4349 | 0.78 | 300 | 0.4962 | 0.1141 | 0.0985 | 0.5357 | 0.0156 | -37.2704 | -33.7494 | -2.2270 | -2.2318 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c83758ade092bee7a7c47d5df91f1f7738a7e7c23657f9385413031647727945
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e5eebef15a07da863c5fc08cc2014b1ba6f08bebfa75043de56daeec484862e
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2275288105010986,
4
- "eval_logits/rejected": -2.2226884365081787,
5
- "eval_logps/chosen": -34.09955978393555,
6
- "eval_logps/rejected": -37.600067138671875,
7
- "eval_loss": 0.693101167678833,
8
- "eval_rewards/accuracies": 0.5070598125457764,
9
- "eval_rewards/chosen": -0.026001954451203346,
10
- "eval_rewards/margins": 0.007375751622021198,
11
- "eval_rewards/rejected": -0.03337770700454712,
12
- "eval_runtime": 145.5528,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.357,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.6506414413452148,
17
- "train_runtime": 3254.0644,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.946,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.4751699732495593,
4
+ "train_runtime": 3253.0203,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6506414413452148,
4
- "train_runtime": 3254.0644,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.4751699732495593,
4
+ "train_runtime": 3253.0203,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8668746948242188,
29
- "logits/rejected": -1.8712046146392822,
30
- "logps/chosen": -36.99528884887695,
31
- "logps/rejected": -33.6615104675293,
32
- "loss": 0.6855,
33
- "rewards/accuracies": 0.5,
34
- "rewards/chosen": 0.004523592535406351,
35
- "rewards/margins": 0.017096158117055893,
36
- "rewards/rejected": -0.012572565115988255,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9979562759399414,
43
- "logits/rejected": -2.000598907470703,
44
- "logps/chosen": -29.644357681274414,
45
- "logps/rejected": -29.06288719177246,
46
- "loss": 0.6942,
47
- "rewards/accuracies": 0.44999998807907104,
48
- "rewards/chosen": -0.0008665517088957131,
49
- "rewards/margins": -0.0010885533411055803,
50
- "rewards/rejected": 0.00022200122475624084,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9213365316390991,
57
- "logits/rejected": -1.9186455011367798,
58
- "logps/chosen": -31.3991756439209,
59
- "logps/rejected": -33.220787048339844,
60
- "loss": 0.6903,
61
- "rewards/accuracies": 0.625,
62
- "rewards/chosen": 0.006767785642296076,
63
- "rewards/margins": 0.007698210421949625,
64
- "rewards/rejected": -0.0009304238483309746,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0176024436950684,
71
- "logits/rejected": -2.008852481842041,
72
- "logps/chosen": -32.565155029296875,
73
- "logps/rejected": -32.51045608520508,
74
- "loss": 0.6924,
75
- "rewards/accuracies": 0.5249999761581421,
76
- "rewards/chosen": 0.004661071114242077,
77
- "rewards/margins": 0.0037749619223177433,
78
- "rewards/rejected": 0.0008861090755090117,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8624731302261353,
85
- "logits/rejected": -1.8516931533813477,
86
- "logps/chosen": -33.547359466552734,
87
- "logps/rejected": -35.463809967041016,
88
- "loss": 0.69,
89
- "rewards/accuracies": 0.5249999761581421,
90
- "rewards/chosen": 0.004046988673508167,
91
- "rewards/margins": 0.008634108118712902,
92
- "rewards/rejected": -0.004587120376527309,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9406204223632812,
99
- "logits/rejected": -1.9425837993621826,
100
- "logps/chosen": -32.54151153564453,
101
- "logps/rejected": -33.21025848388672,
102
- "loss": 0.6772,
103
- "rewards/accuracies": 0.5874999761581421,
104
- "rewards/chosen": 0.023469632491469383,
105
- "rewards/margins": 0.03902136906981468,
106
- "rewards/rejected": -0.015551735647022724,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0720112323760986,
113
- "logits/rejected": -2.0769832134246826,
114
- "logps/chosen": -33.98130416870117,
115
- "logps/rejected": -36.64153289794922,
116
- "loss": 0.6804,
117
- "rewards/accuracies": 0.5625,
118
- "rewards/chosen": 0.0028717622626572847,
119
- "rewards/margins": 0.03193504735827446,
120
- "rewards/rejected": -0.029063284397125244,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9326984882354736,
127
- "logits/rejected": -1.935831069946289,
128
- "logps/chosen": -34.31356430053711,
129
- "logps/rejected": -34.65351867675781,
130
- "loss": 0.6626,
131
- "rewards/accuracies": 0.6499999761581421,
132
- "rewards/chosen": 0.045696478337049484,
133
- "rewards/margins": 0.07014231383800507,
134
- "rewards/rejected": -0.02444584295153618,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9407522678375244,
141
- "logits/rejected": -1.9452556371688843,
142
- "logps/chosen": -32.40108108520508,
143
- "logps/rejected": -32.342872619628906,
144
- "loss": 0.6859,
145
- "rewards/accuracies": 0.5874999761581421,
146
- "rewards/chosen": 0.02653699554502964,
147
- "rewards/margins": 0.020122777670621872,
148
- "rewards/rejected": 0.0064142136834561825,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0378258228302,
155
- "logits/rejected": -2.0358424186706543,
156
- "logps/chosen": -32.15534973144531,
157
- "logps/rejected": -31.297805786132812,
158
- "loss": 0.6726,
159
- "rewards/accuracies": 0.612500011920929,
160
- "rewards/chosen": 0.03419475629925728,
161
- "rewards/margins": 0.04683210328221321,
162
- "rewards/rejected": -0.01263735257089138,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.233442544937134,
168
- "eval_logits/rejected": -2.2285873889923096,
169
- "eval_logps/chosen": -34.044193267822266,
170
- "eval_logps/rejected": -37.55242919921875,
171
- "eval_loss": 0.6902939081192017,
172
- "eval_rewards/accuracies": 0.5481727719306946,
173
- "eval_rewards/chosen": -0.0038560593966394663,
174
- "eval_rewards/margins": 0.010466905310750008,
175
- "eval_rewards/rejected": -0.014322965405881405,
176
- "eval_runtime": 146.038,
177
- "eval_samples_per_second": 2.349,
178
- "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9928157329559326,
185
- "logits/rejected": -1.9904266595840454,
186
- "logps/chosen": -33.1363410949707,
187
- "logps/rejected": -34.00283432006836,
188
- "loss": 0.6883,
189
- "rewards/accuracies": 0.5625,
190
- "rewards/chosen": 0.04299246892333031,
191
- "rewards/margins": 0.03237856179475784,
192
- "rewards/rejected": 0.010613908991217613,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0046679973602295,
199
- "logits/rejected": -1.9963252544403076,
200
- "logps/chosen": -32.3227653503418,
201
- "logps/rejected": -32.157779693603516,
202
- "loss": 0.6741,
203
- "rewards/accuracies": 0.5625,
204
- "rewards/chosen": 0.04885613173246384,
205
- "rewards/margins": 0.0473661907017231,
206
- "rewards/rejected": 0.0014899425441399217,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0337133407592773,
213
- "logits/rejected": -2.0257389545440674,
214
- "logps/chosen": -30.330951690673828,
215
- "logps/rejected": -32.077327728271484,
216
- "loss": 0.6662,
217
- "rewards/accuracies": 0.612500011920929,
218
- "rewards/chosen": 0.05790700390934944,
219
- "rewards/margins": 0.06763499230146408,
220
- "rewards/rejected": -0.00972799677401781,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9636684656143188,
227
- "logits/rejected": -1.9739125967025757,
228
- "logps/chosen": -31.2012996673584,
229
- "logps/rejected": -32.56267547607422,
230
- "loss": 0.6505,
231
- "rewards/accuracies": 0.699999988079071,
232
- "rewards/chosen": 0.08734508603811264,
233
- "rewards/margins": 0.09846383333206177,
234
- "rewards/rejected": -0.011118754744529724,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8753896951675415,
241
- "logits/rejected": -1.8765614032745361,
242
- "logps/chosen": -33.930477142333984,
243
- "logps/rejected": -34.81265640258789,
244
- "loss": 0.6411,
245
- "rewards/accuracies": 0.637499988079071,
246
- "rewards/chosen": 0.10488543659448624,
247
- "rewards/margins": 0.1270444542169571,
248
- "rewards/rejected": -0.022159017622470856,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9266326427459717,
255
- "logits/rejected": -1.9231981039047241,
256
- "logps/chosen": -36.001312255859375,
257
- "logps/rejected": -32.70484924316406,
258
- "loss": 0.6671,
259
- "rewards/accuracies": 0.574999988079071,
260
- "rewards/chosen": 0.0681454986333847,
261
- "rewards/margins": 0.06098458915948868,
262
- "rewards/rejected": 0.007160906679928303,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0281758308410645,
269
- "logits/rejected": -2.0208277702331543,
270
- "logps/chosen": -33.47995376586914,
271
- "logps/rejected": -31.4173526763916,
272
- "loss": 0.6278,
273
- "rewards/accuracies": 0.6625000238418579,
274
- "rewards/chosen": 0.12716376781463623,
275
- "rewards/margins": 0.15336360037326813,
276
- "rewards/rejected": -0.026199836283922195,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.033982038497925,
283
- "logits/rejected": -2.0392203330993652,
284
- "logps/chosen": -32.239437103271484,
285
- "logps/rejected": -32.44209671020508,
286
- "loss": 0.6474,
287
- "rewards/accuracies": 0.7250000238418579,
288
- "rewards/chosen": 0.12213903665542603,
289
- "rewards/margins": 0.10492189973592758,
290
- "rewards/rejected": 0.017217133194208145,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.034848928451538,
297
- "logits/rejected": -2.032095432281494,
298
- "logps/chosen": -31.259105682373047,
299
- "logps/rejected": -31.31390953063965,
300
- "loss": 0.6567,
301
- "rewards/accuracies": 0.612500011920929,
302
- "rewards/chosen": 0.09193893522024155,
303
- "rewards/margins": 0.09021683037281036,
304
- "rewards/rejected": 0.0017221048474311829,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9051767587661743,
311
- "logits/rejected": -1.9098323583602905,
312
- "logps/chosen": -31.32687759399414,
313
- "logps/rejected": -32.82324981689453,
314
- "loss": 0.6419,
315
  "rewards/accuracies": 0.7250000238418579,
316
- "rewards/chosen": 0.1117621660232544,
317
- "rewards/margins": 0.12468986213207245,
318
- "rewards/rejected": -0.012927706353366375,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2309587001800537,
324
- "eval_logits/rejected": -2.2261300086975098,
325
- "eval_logps/chosen": -34.06596374511719,
326
- "eval_logps/rejected": -37.5656852722168,
327
- "eval_loss": 0.6932108402252197,
328
- "eval_rewards/accuracies": 0.5041528344154358,
329
- "eval_rewards/chosen": -0.0125651890411973,
330
- "eval_rewards/margins": 0.007060302421450615,
331
- "eval_rewards/rejected": -0.01962549053132534,
332
- "eval_runtime": 145.8797,
333
- "eval_samples_per_second": 2.351,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.0179035663604736,
341
- "logits/rejected": -2.0285422801971436,
342
- "logps/chosen": -31.756057739257812,
343
- "logps/rejected": -33.936058044433594,
344
- "loss": 0.6444,
345
- "rewards/accuracies": 0.637499988079071,
346
- "rewards/chosen": 0.08609838038682938,
347
- "rewards/margins": 0.11544252932071686,
348
- "rewards/rejected": -0.029344135895371437,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.910278081893921,
355
- "logits/rejected": -1.9250171184539795,
356
- "logps/chosen": -29.805334091186523,
357
- "logps/rejected": -31.600574493408203,
358
- "loss": 0.6326,
359
  "rewards/accuracies": 0.699999988079071,
360
- "rewards/chosen": 0.1224818006157875,
361
- "rewards/margins": 0.139441579580307,
362
- "rewards/rejected": -0.016959769651293755,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9668325185775757,
369
- "logits/rejected": -1.970798134803772,
370
- "logps/chosen": -33.078094482421875,
371
- "logps/rejected": -31.6514949798584,
372
- "loss": 0.621,
373
- "rewards/accuracies": 0.6875,
374
- "rewards/chosen": 0.13575060665607452,
375
- "rewards/margins": 0.1766270101070404,
376
- "rewards/rejected": -0.04087639972567558,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.965409278869629,
383
- "logits/rejected": -1.9435851573944092,
384
- "logps/chosen": -33.83237075805664,
385
- "logps/rejected": -35.09648132324219,
386
- "loss": 0.6165,
387
- "rewards/accuracies": 0.7250000238418579,
388
- "rewards/chosen": 0.12855152785778046,
389
- "rewards/margins": 0.18682697415351868,
390
- "rewards/rejected": -0.05827543884515762,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.006166696548462,
397
- "logits/rejected": -2.002840042114258,
398
- "logps/chosen": -32.69712448120117,
399
- "logps/rejected": -36.255043029785156,
400
- "loss": 0.6502,
401
- "rewards/accuracies": 0.625,
402
- "rewards/chosen": 0.09117679297924042,
403
- "rewards/margins": 0.10322580486536026,
404
- "rewards/rejected": -0.01204901933670044,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8737766742706299,
411
- "logits/rejected": -1.87137770652771,
412
- "logps/chosen": -33.97405242919922,
413
- "logps/rejected": -35.56011962890625,
414
- "loss": 0.6473,
415
- "rewards/accuracies": 0.7250000238418579,
416
- "rewards/chosen": 0.08574129641056061,
417
- "rewards/margins": 0.1079399362206459,
418
- "rewards/rejected": -0.022198637947440147,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8586937189102173,
425
- "logits/rejected": -1.8563038110733032,
426
- "logps/chosen": -34.204627990722656,
427
- "logps/rejected": -31.85955810546875,
428
- "loss": 0.6478,
429
- "rewards/accuracies": 0.675000011920929,
430
- "rewards/chosen": 0.07718975841999054,
431
- "rewards/margins": 0.11248154938220978,
432
- "rewards/rejected": -0.03529178351163864,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.962044358253479,
439
- "logits/rejected": -1.9515289068222046,
440
- "logps/chosen": -35.0092887878418,
441
- "logps/rejected": -31.877635955810547,
442
- "loss": 0.6267,
443
- "rewards/accuracies": 0.7250000238418579,
444
- "rewards/chosen": 0.13894058763980865,
445
- "rewards/margins": 0.15359947085380554,
446
- "rewards/rejected": -0.01465887576341629,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0572900772094727,
453
- "logits/rejected": -2.0424036979675293,
454
- "logps/chosen": -30.72947120666504,
455
- "logps/rejected": -32.645362854003906,
456
- "loss": 0.6657,
457
  "rewards/accuracies": 0.625,
458
- "rewards/chosen": 0.07786226272583008,
459
- "rewards/margins": 0.07527298480272293,
460
- "rewards/rejected": 0.002589278621599078,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9281587600708008,
467
- "logits/rejected": -1.9256232976913452,
468
- "logps/chosen": -32.37285614013672,
469
- "logps/rejected": -30.912500381469727,
470
- "loss": 0.5902,
471
- "rewards/accuracies": 0.75,
472
- "rewards/chosen": 0.22170230746269226,
473
- "rewards/margins": 0.2599778473377228,
474
- "rewards/rejected": -0.03827553242444992,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2279410362243652,
480
- "eval_logits/rejected": -2.2231125831604004,
481
- "eval_logps/chosen": -34.09661865234375,
482
- "eval_logps/rejected": -37.598716735839844,
483
- "eval_loss": 0.6926039457321167,
484
- "eval_rewards/accuracies": 0.49543190002441406,
485
- "eval_rewards/chosen": -0.024825766682624817,
486
- "eval_rewards/margins": 0.008013932965695858,
487
- "eval_rewards/rejected": -0.03283970057964325,
488
- "eval_runtime": 145.6649,
489
- "eval_samples_per_second": 2.355,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9122984409332275,
497
- "logits/rejected": -1.9090602397918701,
498
- "logps/chosen": -31.30501365661621,
499
- "logps/rejected": -33.817237854003906,
500
- "loss": 0.6314,
501
- "rewards/accuracies": 0.75,
502
- "rewards/chosen": 0.11933410167694092,
503
- "rewards/margins": 0.149368017911911,
504
- "rewards/rejected": -0.030033910647034645,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.962451696395874,
511
- "logits/rejected": -1.950260877609253,
512
- "logps/chosen": -34.321712493896484,
513
- "logps/rejected": -33.68184280395508,
514
- "loss": 0.6222,
515
- "rewards/accuracies": 0.699999988079071,
516
- "rewards/chosen": 0.10974420607089996,
517
- "rewards/margins": 0.16694548726081848,
518
- "rewards/rejected": -0.0572013333439827,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9975078105926514,
525
- "logits/rejected": -1.996080994606018,
526
- "logps/chosen": -33.17241287231445,
527
- "logps/rejected": -32.53407669067383,
528
- "loss": 0.6313,
529
- "rewards/accuracies": 0.75,
530
- "rewards/chosen": 0.12535209953784943,
531
- "rewards/margins": 0.14715085923671722,
532
- "rewards/rejected": -0.021798742935061455,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0841541290283203,
539
- "logits/rejected": -2.0684666633605957,
540
- "logps/chosen": -33.79545211791992,
541
- "logps/rejected": -33.1196174621582,
542
- "loss": 0.6276,
543
- "rewards/accuracies": 0.6875,
544
- "rewards/chosen": 0.15357479453086853,
545
- "rewards/margins": 0.15420618653297424,
546
- "rewards/rejected": -0.000631416798569262,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9566657543182373,
553
- "logits/rejected": -1.9558334350585938,
554
- "logps/chosen": -32.842193603515625,
555
- "logps/rejected": -32.53485870361328,
556
- "loss": 0.6121,
557
- "rewards/accuracies": 0.6625000238418579,
558
- "rewards/chosen": 0.17596027255058289,
559
- "rewards/margins": 0.2038455456495285,
560
- "rewards/rejected": -0.027885273098945618,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9120171070098877,
567
- "logits/rejected": -1.9222869873046875,
568
- "logps/chosen": -31.887542724609375,
569
- "logps/rejected": -35.35187911987305,
570
- "loss": 0.6293,
571
- "rewards/accuracies": 0.6875,
572
- "rewards/chosen": 0.12685921788215637,
573
- "rewards/margins": 0.15210750699043274,
574
- "rewards/rejected": -0.025248277932405472,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0516254901885986,
581
- "logits/rejected": -2.045139789581299,
582
- "logps/chosen": -33.33611297607422,
583
- "logps/rejected": -29.27042579650879,
584
- "loss": 0.6307,
585
- "rewards/accuracies": 0.737500011920929,
586
- "rewards/chosen": 0.12634168565273285,
587
- "rewards/margins": 0.14394234120845795,
588
- "rewards/rejected": -0.01760067418217659,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9114952087402344,
595
- "logits/rejected": -1.9137216806411743,
596
- "logps/chosen": -33.884849548339844,
597
- "logps/rejected": -30.985692977905273,
598
- "loss": 0.6144,
599
- "rewards/accuracies": 0.6875,
600
- "rewards/chosen": 0.14167849719524384,
601
- "rewards/margins": 0.18690630793571472,
602
- "rewards/rejected": -0.04522782564163208,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.6506414413452148,
610
- "train_runtime": 3254.0644,
611
- "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.5,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8668408393859863,
29
+ "logits/rejected": -1.8711602687835693,
30
+ "logps/chosen": -36.98978042602539,
31
+ "logps/rejected": -33.66878890991211,
32
+ "loss": 0.4962,
33
+ "rewards/accuracies": 0.5416666865348816,
34
+ "rewards/chosen": 0.006728413049131632,
35
+ "rewards/margins": 0.022216208279132843,
36
+ "rewards/rejected": -0.015487794764339924,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9970359802246094,
43
+ "logits/rejected": -1.9996885061264038,
44
+ "logps/chosen": -29.635208129882812,
45
+ "logps/rejected": -29.063350677490234,
46
+ "loss": 0.4994,
47
+ "rewards/accuracies": 0.5249999761581421,
48
+ "rewards/chosen": 0.002794977743178606,
49
+ "rewards/margins": 0.002757500857114792,
50
+ "rewards/rejected": 3.747665323317051e-05,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9208602905273438,
57
+ "logits/rejected": -1.9181534051895142,
58
+ "logps/chosen": -31.40317726135254,
59
+ "logps/rejected": -33.23335647583008,
60
+ "loss": 0.4982,
61
+ "rewards/accuracies": 0.550000011920929,
62
+ "rewards/chosen": 0.005168457515537739,
63
+ "rewards/margins": 0.011126698926091194,
64
+ "rewards/rejected": -0.005958239547908306,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.017474889755249,
71
+ "logits/rejected": -2.008759021759033,
72
+ "logps/chosen": -32.54490661621094,
73
+ "logps/rejected": -32.49110794067383,
74
+ "loss": 0.4994,
75
+ "rewards/accuracies": 0.4749999940395355,
76
+ "rewards/chosen": 0.012759355828166008,
77
+ "rewards/margins": 0.004133358132094145,
78
+ "rewards/rejected": 0.008625999093055725,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8631575107574463,
85
+ "logits/rejected": -1.8523809909820557,
86
+ "logps/chosen": -33.509056091308594,
87
+ "logps/rejected": -35.39984893798828,
88
+ "loss": 0.5001,
89
+ "rewards/accuracies": 0.48750001192092896,
90
+ "rewards/chosen": 0.01936880685389042,
91
+ "rewards/margins": -0.001629653968848288,
92
+ "rewards/rejected": 0.020998459309339523,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9419567584991455,
99
+ "logits/rejected": -1.9438903331756592,
100
+ "logps/chosen": -32.50743103027344,
101
+ "logps/rejected": -33.188419342041016,
102
+ "loss": 0.491,
103
+ "rewards/accuracies": 0.6000000238418579,
104
+ "rewards/chosen": 0.03710051625967026,
105
+ "rewards/margins": 0.04391607269644737,
106
+ "rewards/rejected": -0.006815555039793253,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0737738609313965,
113
+ "logits/rejected": -2.0787465572357178,
114
+ "logps/chosen": -33.928836822509766,
115
+ "logps/rejected": -36.540794372558594,
116
+ "loss": 0.4971,
117
+ "rewards/accuracies": 0.512499988079071,
118
+ "rewards/chosen": 0.023859737440943718,
119
+ "rewards/margins": 0.01262708194553852,
120
+ "rewards/rejected": 0.01123266015201807,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.934011697769165,
127
+ "logits/rejected": -1.9371341466903687,
128
+ "logps/chosen": -34.23366928100586,
129
+ "logps/rejected": -34.542274475097656,
130
+ "loss": 0.4864,
131
+ "rewards/accuracies": 0.6000000238418579,
132
+ "rewards/chosen": 0.07765541225671768,
133
+ "rewards/margins": 0.057602256536483765,
134
+ "rewards/rejected": 0.020053153857588768,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9421571493148804,
141
+ "logits/rejected": -1.9466804265975952,
142
+ "logps/chosen": -32.304595947265625,
143
+ "logps/rejected": -32.284873962402344,
144
+ "loss": 0.4909,
145
+ "rewards/accuracies": 0.6499999761581421,
146
+ "rewards/chosen": 0.0651295855641365,
147
+ "rewards/margins": 0.035516757518053055,
148
+ "rewards/rejected": 0.02961282804608345,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.0406017303466797,
155
+ "logits/rejected": -2.038613796234131,
156
+ "logps/chosen": -32.027652740478516,
157
+ "logps/rejected": -31.224151611328125,
158
+ "loss": 0.4845,
159
+ "rewards/accuracies": 0.637499988079071,
160
+ "rewards/chosen": 0.08527366816997528,
161
+ "rewards/margins": 0.0684497207403183,
162
+ "rewards/rejected": 0.01682395115494728,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.234653949737549,
168
+ "eval_logits/rejected": -2.229806423187256,
169
+ "eval_logps/chosen": -33.91841125488281,
170
+ "eval_logps/rejected": -37.42335891723633,
171
+ "eval_loss": 0.4981803297996521,
172
+ "eval_rewards/accuracies": 0.5510797500610352,
173
+ "eval_rewards/chosen": 0.04645563289523125,
174
+ "eval_rewards/margins": 0.009152057580649853,
175
+ "eval_rewards/rejected": 0.03730357065796852,
176
+ "eval_runtime": 145.8684,
177
+ "eval_samples_per_second": 2.351,
178
+ "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9949238300323486,
185
+ "logits/rejected": -1.9925572872161865,
186
+ "logps/chosen": -32.99268341064453,
187
+ "logps/rejected": -33.90182113647461,
188
+ "loss": 0.4869,
189
+ "rewards/accuracies": 0.5874999761581421,
190
+ "rewards/chosen": 0.10045422613620758,
191
+ "rewards/margins": 0.049433451145887375,
192
+ "rewards/rejected": 0.051020748913288116,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0055007934570312,
199
+ "logits/rejected": -1.997157335281372,
200
+ "logps/chosen": -32.192996978759766,
201
+ "logps/rejected": -32.01173400878906,
202
+ "loss": 0.491,
203
+ "rewards/accuracies": 0.574999988079071,
204
+ "rewards/chosen": 0.10076460987329483,
205
+ "rewards/margins": 0.04085635766386986,
206
+ "rewards/rejected": 0.05990824103355408,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.034147262573242,
213
+ "logits/rejected": -2.026184558868408,
214
+ "logps/chosen": -30.194936752319336,
215
+ "logps/rejected": -31.905689239501953,
216
+ "loss": 0.4876,
217
+ "rewards/accuracies": 0.574999988079071,
218
+ "rewards/chosen": 0.11231068521738052,
219
+ "rewards/margins": 0.05338172987103462,
220
+ "rewards/rejected": 0.0589289590716362,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9643388986587524,
227
+ "logits/rejected": -1.9745395183563232,
228
+ "logps/chosen": -31.08599853515625,
229
+ "logps/rejected": -32.422943115234375,
230
+ "loss": 0.4795,
231
+ "rewards/accuracies": 0.6625000238418579,
232
+ "rewards/chosen": 0.13346607983112335,
233
+ "rewards/margins": 0.08869143575429916,
234
+ "rewards/rejected": 0.044774629175662994,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8776963949203491,
241
+ "logits/rejected": -1.8788686990737915,
242
+ "logps/chosen": -33.690345764160156,
243
+ "logps/rejected": -34.572776794433594,
244
+ "loss": 0.4699,
245
+ "rewards/accuracies": 0.574999988079071,
246
+ "rewards/chosen": 0.20093801617622375,
247
+ "rewards/margins": 0.12714678049087524,
248
+ "rewards/rejected": 0.07379122078418732,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9286582469940186,
255
+ "logits/rejected": -1.9253017902374268,
256
+ "logps/chosen": -35.773475646972656,
257
+ "logps/rejected": -32.47566604614258,
258
+ "loss": 0.4851,
259
+ "rewards/accuracies": 0.625,
260
+ "rewards/chosen": 0.15928010642528534,
261
+ "rewards/margins": 0.06044477969408035,
262
+ "rewards/rejected": 0.09883531928062439,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0298352241516113,
269
+ "logits/rejected": -2.0225348472595215,
270
+ "logps/chosen": -33.22509002685547,
271
+ "logps/rejected": -31.19403648376465,
272
+ "loss": 0.4603,
273
+ "rewards/accuracies": 0.675000011920929,
274
+ "rewards/chosen": 0.22910937666893005,
275
+ "rewards/margins": 0.165984109044075,
276
+ "rewards/rejected": 0.06312531232833862,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.036612033843994,
283
+ "logits/rejected": -2.041813373565674,
284
+ "logps/chosen": -31.960119247436523,
285
+ "logps/rejected": -32.171165466308594,
286
+ "loss": 0.4734,
287
+ "rewards/accuracies": 0.6875,
288
+ "rewards/chosen": 0.23386716842651367,
289
+ "rewards/margins": 0.1082783117890358,
290
+ "rewards/rejected": 0.12558886408805847,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.037198066711426,
297
+ "logits/rejected": -2.0344595909118652,
298
+ "logps/chosen": -31.026615142822266,
299
+ "logps/rejected": -31.082998275756836,
300
+ "loss": 0.4781,
301
+ "rewards/accuracies": 0.625,
302
+ "rewards/chosen": 0.184935063123703,
303
+ "rewards/margins": 0.09084881842136383,
304
+ "rewards/rejected": 0.09408621490001678,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.908818006515503,
311
+ "logits/rejected": -1.913496732711792,
312
+ "logps/chosen": -31.075199127197266,
313
+ "logps/rejected": -32.616241455078125,
314
+ "loss": 0.466,
315
  "rewards/accuracies": 0.7250000238418579,
316
+ "rewards/chosen": 0.2124340832233429,
317
+ "rewards/margins": 0.14255891740322113,
318
+ "rewards/rejected": 0.06987515836954117,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.23366379737854,
324
+ "eval_logits/rejected": -2.2288384437561035,
325
+ "eval_logps/chosen": -33.779903411865234,
326
+ "eval_logps/rejected": -37.29892349243164,
327
+ "eval_loss": 0.4965229630470276,
328
+ "eval_rewards/accuracies": 0.545265793800354,
329
+ "eval_rewards/chosen": 0.10185908526182175,
330
+ "eval_rewards/margins": 0.014780867844820023,
331
+ "eval_rewards/rejected": 0.08707821369171143,
332
+ "eval_runtime": 145.8117,
333
+ "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0208163261413574,
341
+ "logits/rejected": -2.0314111709594727,
342
+ "logps/chosen": -31.514019012451172,
343
+ "logps/rejected": -33.690643310546875,
344
+ "loss": 0.4728,
345
+ "rewards/accuracies": 0.6000000238418579,
346
+ "rewards/chosen": 0.18291443586349487,
347
+ "rewards/margins": 0.114091657102108,
348
+ "rewards/rejected": 0.06882277131080627,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9141031503677368,
355
+ "logits/rejected": -1.9288082122802734,
356
+ "logps/chosen": -29.5712947845459,
357
+ "logps/rejected": -31.429983139038086,
358
+ "loss": 0.4615,
359
  "rewards/accuracies": 0.699999988079071,
360
+ "rewards/chosen": 0.2160976380109787,
361
+ "rewards/margins": 0.16482076048851013,
362
+ "rewards/rejected": 0.05127686262130737,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9712814092636108,
369
+ "logits/rejected": -1.975285291671753,
370
+ "logps/chosen": -32.82429885864258,
371
+ "logps/rejected": -31.416866302490234,
372
+ "loss": 0.4561,
373
+ "rewards/accuracies": 0.6625000238418579,
374
+ "rewards/chosen": 0.23726816475391388,
375
+ "rewards/margins": 0.18429425358772278,
376
+ "rewards/rejected": 0.052973903715610504,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.969780683517456,
383
+ "logits/rejected": -1.9480432271957397,
384
+ "logps/chosen": -33.583518981933594,
385
+ "logps/rejected": -34.8461799621582,
386
+ "loss": 0.4567,
387
+ "rewards/accuracies": 0.675000011920929,
388
+ "rewards/chosen": 0.22809162735939026,
389
+ "rewards/margins": 0.18624703586101532,
390
+ "rewards/rejected": 0.04184458404779434,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0109124183654785,
397
+ "logits/rejected": -2.0076212882995605,
398
+ "logps/chosen": -32.46331024169922,
399
+ "logps/rejected": -35.97381591796875,
400
+ "loss": 0.4799,
401
+ "rewards/accuracies": 0.5625,
402
+ "rewards/chosen": 0.18470348417758942,
403
+ "rewards/margins": 0.08426074683666229,
404
+ "rewards/rejected": 0.10044274479150772,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.878321647644043,
411
+ "logits/rejected": -1.8758872747421265,
412
+ "logps/chosen": -33.721397399902344,
413
+ "logps/rejected": -35.270362854003906,
414
+ "loss": 0.4785,
415
+ "rewards/accuracies": 0.6000000238418579,
416
+ "rewards/chosen": 0.1868000328540802,
417
+ "rewards/margins": 0.09309660643339157,
418
+ "rewards/rejected": 0.09370341151952744,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8631956577301025,
425
+ "logits/rejected": -1.8606828451156616,
426
+ "logps/chosen": -33.90094757080078,
427
+ "logps/rejected": -31.57466697692871,
428
+ "loss": 0.4722,
429
+ "rewards/accuracies": 0.637499988079071,
430
+ "rewards/chosen": 0.198659747838974,
431
+ "rewards/margins": 0.11999478191137314,
432
+ "rewards/rejected": 0.07866497337818146,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9663877487182617,
439
+ "logits/rejected": -1.9559704065322876,
440
+ "logps/chosen": -34.72657775878906,
441
+ "logps/rejected": -31.63601303100586,
442
+ "loss": 0.4585,
443
+ "rewards/accuracies": 0.6499999761581421,
444
+ "rewards/chosen": 0.25202488899230957,
445
+ "rewards/margins": 0.17003390192985535,
446
+ "rewards/rejected": 0.08199100196361542,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.062107801437378,
453
+ "logits/rejected": -2.0472733974456787,
454
+ "logps/chosen": -30.40212059020996,
455
+ "logps/rejected": -32.340721130371094,
456
+ "loss": 0.4799,
457
  "rewards/accuracies": 0.625,
458
+ "rewards/chosen": 0.2088027447462082,
459
+ "rewards/margins": 0.08435753732919693,
460
+ "rewards/rejected": 0.12444518506526947,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9330482482910156,
467
+ "logits/rejected": -1.9305979013442993,
468
+ "logps/chosen": -32.06965255737305,
469
+ "logps/rejected": -30.65035629272461,
470
+ "loss": 0.4349,
471
+ "rewards/accuracies": 0.6625000238418579,
472
+ "rewards/chosen": 0.3429831862449646,
473
+ "rewards/margins": 0.27640262246131897,
474
+ "rewards/rejected": 0.06658058613538742,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2318336963653564,
480
+ "eval_logits/rejected": -2.2270052433013916,
481
+ "eval_logps/chosen": -33.749427795410156,
482
+ "eval_logps/rejected": -37.27041244506836,
483
+ "eval_loss": 0.4961945414543152,
484
+ "eval_rewards/accuracies": 0.5357142686843872,
485
+ "eval_rewards/chosen": 0.11405016481876373,
486
+ "eval_rewards/margins": 0.015567691065371037,
487
+ "eval_rewards/rejected": 0.09848246723413467,
488
+ "eval_runtime": 145.5479,
489
+ "eval_samples_per_second": 2.357,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9179494380950928,
497
+ "logits/rejected": -1.9148216247558594,
498
+ "logps/chosen": -31.037755966186523,
499
+ "logps/rejected": -33.56406021118164,
500
+ "loss": 0.4639,
501
+ "rewards/accuracies": 0.762499988079071,
502
+ "rewards/chosen": 0.22623713314533234,
503
+ "rewards/margins": 0.15500028431415558,
504
+ "rewards/rejected": 0.07123686373233795,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9682966470718384,
511
+ "logits/rejected": -1.9561439752578735,
512
+ "logps/chosen": -34.023921966552734,
513
+ "logps/rejected": -33.415985107421875,
514
+ "loss": 0.4575,
515
+ "rewards/accuracies": 0.6875,
516
+ "rewards/chosen": 0.22886168956756592,
517
+ "rewards/margins": 0.17971986532211304,
518
+ "rewards/rejected": 0.04914180561900139,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.003788709640503,
525
+ "logits/rejected": -2.002437114715576,
526
+ "logps/chosen": -32.8862419128418,
527
+ "logps/rejected": -32.21629333496094,
528
+ "loss": 0.4679,
529
+ "rewards/accuracies": 0.675000011920929,
530
+ "rewards/chosen": 0.23982055485248566,
531
+ "rewards/margins": 0.13450448215007782,
532
+ "rewards/rejected": 0.10531606525182724,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0905163288116455,
539
+ "logits/rejected": -2.0749027729034424,
540
+ "logps/chosen": -33.47309112548828,
541
+ "logps/rejected": -32.81334686279297,
542
+ "loss": 0.4618,
543
+ "rewards/accuracies": 0.675000011920929,
544
+ "rewards/chosen": 0.2825208008289337,
545
+ "rewards/margins": 0.16064420342445374,
546
+ "rewards/rejected": 0.12187659740447998,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9627025127410889,
553
+ "logits/rejected": -1.9618685245513916,
554
+ "logps/chosen": -32.528175354003906,
555
+ "logps/rejected": -32.22235107421875,
556
+ "loss": 0.4522,
557
+ "rewards/accuracies": 0.637499988079071,
558
+ "rewards/chosen": 0.30156710743904114,
559
+ "rewards/margins": 0.20444798469543457,
560
+ "rewards/rejected": 0.09711913019418716,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.918320655822754,
567
+ "logits/rejected": -1.9285932779312134,
568
+ "logps/chosen": -31.606945037841797,
569
+ "logps/rejected": -34.98893356323242,
570
+ "loss": 0.4718,
571
+ "rewards/accuracies": 0.625,
572
+ "rewards/chosen": 0.2390974760055542,
573
+ "rewards/margins": 0.11916828155517578,
574
+ "rewards/rejected": 0.11992917954921722,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.0581445693969727,
581
+ "logits/rejected": -2.051628828048706,
582
+ "logps/chosen": -33.017967224121094,
583
+ "logps/rejected": -28.989696502685547,
584
+ "loss": 0.4612,
585
+ "rewards/accuracies": 0.7749999761581421,
586
+ "rewards/chosen": 0.253601610660553,
587
+ "rewards/margins": 0.15891048312187195,
588
+ "rewards/rejected": 0.09469114243984222,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9176830053329468,
595
+ "logits/rejected": -1.919847846031189,
596
+ "logps/chosen": -33.614540100097656,
597
+ "logps/rejected": -30.760555267333984,
598
+ "loss": 0.4519,
599
+ "rewards/accuracies": 0.7875000238418579,
600
+ "rewards/chosen": 0.24980488419532776,
601
+ "rewards/margins": 0.2049761563539505,
602
+ "rewards/rejected": 0.04482869431376457,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.4751699732495593,
610
+ "train_runtime": 3253.0203,
611
+ "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],