hugodk-sch commited on
Commit
d92c661
1 Parent(s): d6b266a

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +372 -372
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.9781
24
- - Rewards/chosen: -0.0159
25
- - Rewards/rejected: -0.0383
26
- - Rewards/accuracies: 0.5170
27
- - Rewards/margins: 0.0224
28
- - Logps/rejected: -37.5644
29
- - Logps/chosen: -34.0544
30
- - Logits/rejected: -2.2184
31
- - Logits/chosen: -2.2232
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.9007 | 0.26 | 100 | 0.9692 | 0.0124 | -0.0188 | 0.5752 | 0.0312 | -37.5401 | -34.0190 | -2.2262 | -2.2310 |
67
- | 0.7243 | 0.52 | 200 | 0.9691 | 0.0136 | -0.0181 | 0.5656 | 0.0317 | -37.5392 | -34.0175 | -2.2232 | -2.2280 |
68
- | 0.6515 | 0.78 | 300 | 0.9644 | -0.0061 | -0.0440 | 0.5602 | 0.0378 | -37.5716 | -34.0422 | -2.2186 | -2.2234 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.6973
21
+ - Rewards/chosen: -0.0555
22
+ - Rewards/rejected: -0.0750
23
+ - Rewards/accuracies: 0.5390
24
+ - Rewards/margins: 0.0195
25
+ - Logps/rejected: -37.6000
26
+ - Logps/chosen: -34.0962
27
+ - Logits/rejected: -2.2243
28
+ - Logits/chosen: -2.2292
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6464 | 0.26 | 100 | 0.6903 | -0.0054 | -0.0300 | 0.5685 | 0.0246 | -37.5500 | -34.0405 | -2.2291 | -2.2340 |
64
+ | 0.5931 | 0.52 | 200 | 0.6980 | -0.0346 | -0.0543 | 0.5158 | 0.0196 | -37.5769 | -34.0730 | -2.2267 | -2.2316 |
65
+ | 0.5301 | 0.78 | 300 | 0.6973 | -0.0555 | -0.0750 | 0.5390 | 0.0195 | -37.6000 | -34.0962 | -2.2243 | -2.2292 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64cfd5abbbb41907ae32f148bf01549e450f38c8c806cac936b2152f636757da
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:955ddc6d94a67cda7538a1b702c976860689f0909cc722268fc8280507e6d239
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2231976985931396,
4
- "eval_logits/rejected": -2.218374013900757,
5
- "eval_logps/chosen": -34.05440139770508,
6
- "eval_logps/rejected": -37.5644416809082,
7
- "eval_loss": 0.9781046509742737,
8
- "eval_rewards/accuracies": 0.5170266032218933,
9
- "eval_rewards/chosen": -0.01588033325970173,
10
- "eval_rewards/margins": 0.02237490564584732,
11
- "eval_rewards/rejected": -0.0382552333176136,
12
- "eval_runtime": 145.5341,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.357,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.8298485310046704,
17
- "train_runtime": 3249.7236,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6175476637753573,
4
+ "train_runtime": 3252.7839,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.8298485310046704,
4
- "train_runtime": 3249.7236,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6175476637753573,
4
+ "train_runtime": 3252.7839,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,155 +25,155 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8667426109313965,
29
- "logits/rejected": -1.8710602521896362,
30
- "logps/chosen": -36.991912841796875,
31
- "logps/rejected": -33.67206954956055,
32
- "loss": 0.9547,
33
- "rewards/accuracies": 0.5694444179534912,
34
- "rewards/chosen": 0.011750025674700737,
35
- "rewards/margins": 0.04534290358424187,
36
- "rewards/rejected": -0.03359287604689598,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9977442026138306,
43
- "logits/rejected": -2.0003952980041504,
44
- "logps/chosen": -29.659366607666016,
45
- "logps/rejected": -29.05437660217285,
46
- "loss": 1.021,
47
- "rewards/accuracies": 0.4124999940395355,
48
- "rewards/chosen": -0.013736436143517494,
49
- "rewards/margins": -0.02098780684173107,
50
- "rewards/rejected": 0.007251373026520014,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.920693039894104,
57
- "logits/rejected": -1.91802179813385,
58
- "logps/chosen": -31.39971351623535,
59
- "logps/rejected": -33.21495819091797,
60
- "loss": 0.9897,
61
- "rewards/accuracies": 0.5625,
62
- "rewards/chosen": 0.013103686273097992,
63
- "rewards/margins": 0.010304747149348259,
64
- "rewards/rejected": 0.002798942383378744,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.018057107925415,
71
- "logits/rejected": -2.0093047618865967,
72
- "logps/chosen": -32.565284729003906,
73
- "logps/rejected": -32.50053405761719,
74
- "loss": 1.0005,
75
- "rewards/accuracies": 0.5375000238418579,
76
- "rewards/chosen": 0.009215259924530983,
77
- "rewards/margins": -0.0004936732584610581,
78
- "rewards/rejected": 0.009708933532238007,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8627817630767822,
85
- "logits/rejected": -1.851999044418335,
86
- "logps/chosen": -33.549964904785156,
87
- "logps/rejected": -35.44340896606445,
88
- "loss": 1.0011,
89
- "rewards/accuracies": 0.4749999940395355,
90
- "rewards/chosen": 0.0060120681300759315,
91
- "rewards/margins": -0.0011377219343557954,
92
- "rewards/rejected": 0.007149793207645416,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9416770935058594,
99
- "logits/rejected": -1.9436094760894775,
100
- "logps/chosen": -32.53351593017578,
101
- "logps/rejected": -33.217529296875,
102
- "loss": 0.9247,
103
- "rewards/accuracies": 0.5625,
104
- "rewards/chosen": 0.05333293229341507,
105
- "rewards/margins": 0.09025315940380096,
106
- "rewards/rejected": -0.036920223385095596,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.072779655456543,
113
- "logits/rejected": -2.077756881713867,
114
- "logps/chosen": -34.002342224121094,
115
- "logps/rejected": -36.633216857910156,
116
- "loss": 0.9596,
117
  "rewards/accuracies": 0.5249999761581421,
118
- "rewards/chosen": -0.011083832010626793,
119
- "rewards/margins": 0.040393490344285965,
120
- "rewards/rejected": -0.05147732421755791,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9329026937484741,
127
- "logits/rejected": -1.936031699180603,
128
- "logps/chosen": -34.33915710449219,
129
- "logps/rejected": -34.65736770629883,
130
- "loss": 0.8892,
131
- "rewards/accuracies": 0.5625,
132
- "rewards/chosen": 0.07092130184173584,
133
- "rewards/margins": 0.12288935482501984,
134
- "rewards/rejected": -0.051968056708574295,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9402366876602173,
141
- "logits/rejected": -1.9447383880615234,
142
- "logps/chosen": -32.383628845214844,
143
- "logps/rejected": -32.351661682128906,
144
- "loss": 0.9388,
145
- "rewards/accuracies": 0.574999988079071,
146
- "rewards/chosen": 0.06703362613916397,
147
- "rewards/margins": 0.06123671680688858,
148
- "rewards/rejected": 0.0057969121262431145,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0364508628845215,
155
- "logits/rejected": -2.0344765186309814,
156
- "logps/chosen": -32.12981033325195,
157
- "logps/rejected": -31.279254913330078,
158
- "loss": 0.9007,
159
- "rewards/accuracies": 0.612500011920929,
160
- "rewards/chosen": 0.08881844580173492,
161
- "rewards/margins": 0.09925105422735214,
162
- "rewards/rejected": -0.010432596318423748,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.231006622314453,
168
- "eval_logits/rejected": -2.2261619567871094,
169
- "eval_logps/chosen": -34.019046783447266,
170
- "eval_logps/rejected": -37.540069580078125,
171
- "eval_loss": 0.96916264295578,
172
- "eval_rewards/accuracies": 0.5751661062240601,
173
- "eval_rewards/chosen": 0.012404282577335835,
174
- "eval_rewards/margins": 0.031165316700935364,
175
- "eval_rewards/rejected": -0.018761036917567253,
176
- "eval_runtime": 146.0189,
177
  "eval_samples_per_second": 2.349,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
@@ -181,311 +181,311 @@
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.991776466369629,
185
- "logits/rejected": -1.9893957376480103,
186
- "logps/chosen": -33.122779846191406,
187
- "logps/rejected": -33.99274444580078,
188
- "loss": 0.9495,
189
- "rewards/accuracies": 0.6000000238418579,
190
- "rewards/chosen": 0.09682749211788177,
191
- "rewards/margins": 0.0675249844789505,
192
- "rewards/rejected": 0.029302507638931274,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.003622531890869,
199
- "logits/rejected": -1.9952924251556396,
200
- "logps/chosen": -32.31382369995117,
201
- "logps/rejected": -32.122901916503906,
202
- "loss": 0.9341,
203
- "rewards/accuracies": 0.5874999761581421,
204
- "rewards/chosen": 0.10486602783203125,
205
- "rewards/margins": 0.07398126274347305,
206
- "rewards/rejected": 0.030884763225913048,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0311331748962402,
213
- "logits/rejected": -2.0231704711914062,
214
- "logps/chosen": -30.32816505432129,
215
- "logps/rejected": -32.052425384521484,
216
- "loss": 0.9159,
217
- "rewards/accuracies": 0.574999988079071,
218
- "rewards/chosen": 0.11803986132144928,
219
- "rewards/margins": 0.11757204681634903,
220
- "rewards/rejected": 0.0004678152617998421,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9613821506500244,
227
- "logits/rejected": -1.9716154336929321,
228
- "logps/chosen": -31.22269630432129,
229
- "logps/rejected": -32.548851013183594,
230
- "loss": 0.8406,
231
- "rewards/accuracies": 0.6499999761581421,
232
- "rewards/chosen": 0.15757359564304352,
233
- "rewards/margins": 0.16875064373016357,
234
- "rewards/rejected": -0.01117704901844263,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8720792531967163,
241
- "logits/rejected": -1.8732519149780273,
242
- "logps/chosen": -33.89937973022461,
243
- "logps/rejected": -34.796844482421875,
244
- "loss": 0.7907,
245
- "rewards/accuracies": 0.5874999761581421,
246
- "rewards/chosen": 0.23464930057525635,
247
- "rewards/margins": 0.2663186490535736,
248
- "rewards/rejected": -0.03166933357715607,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.923437476158142,
255
- "logits/rejected": -1.9200271368026733,
256
- "logps/chosen": -35.97971725463867,
257
- "logps/rejected": -32.6976203918457,
258
- "loss": 0.8704,
259
- "rewards/accuracies": 0.625,
260
- "rewards/chosen": 0.1535695493221283,
261
- "rewards/margins": 0.1334635317325592,
262
- "rewards/rejected": 0.02010601945221424,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0249733924865723,
269
- "logits/rejected": -2.017641067504883,
270
- "logps/chosen": -33.441261291503906,
271
- "logps/rejected": -31.389623641967773,
272
- "loss": 0.7234,
273
- "rewards/accuracies": 0.7124999761581421,
274
- "rewards/chosen": 0.2852834165096283,
275
- "rewards/margins": 0.31550443172454834,
276
- "rewards/rejected": -0.03022097982466221,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.031703472137451,
283
- "logits/rejected": -2.036947727203369,
284
- "logps/chosen": -32.208961486816406,
285
- "logps/rejected": -32.41345977783203,
286
- "loss": 0.8097,
287
  "rewards/accuracies": 0.699999988079071,
288
- "rewards/chosen": 0.26866063475608826,
289
- "rewards/margins": 0.21131709218025208,
290
- "rewards/rejected": 0.05734356492757797,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.032174587249756,
297
- "logits/rejected": -2.029404401779175,
298
- "logps/chosen": -31.245046615600586,
299
- "logps/rejected": -31.299081802368164,
300
- "loss": 0.8381,
301
- "rewards/accuracies": 0.625,
302
- "rewards/chosen": 0.19512517750263214,
303
- "rewards/margins": 0.17981843650341034,
304
- "rewards/rejected": 0.015306718647480011,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9025481939315796,
311
- "logits/rejected": -1.9071909189224243,
312
- "logps/chosen": -31.24801254272461,
313
- "logps/rejected": -32.809139251708984,
314
- "loss": 0.7243,
315
- "rewards/accuracies": 0.762499988079071,
316
- "rewards/chosen": 0.28661441802978516,
317
- "rewards/margins": 0.3011789917945862,
318
- "rewards/rejected": -0.014564569108188152,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.227985382080078,
324
- "eval_logits/rejected": -2.2231621742248535,
325
- "eval_logps/chosen": -34.01750946044922,
326
- "eval_logps/rejected": -37.539207458496094,
327
- "eval_loss": 0.9690985083580017,
328
- "eval_rewards/accuracies": 0.565614640712738,
329
- "eval_rewards/chosen": 0.013635948300361633,
330
- "eval_rewards/margins": 0.03170585632324219,
331
- "eval_rewards/rejected": -0.018069909885525703,
332
- "eval_runtime": 145.4584,
333
- "eval_samples_per_second": 2.358,
334
- "eval_steps_per_second": 0.296,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.0139317512512207,
341
- "logits/rejected": -2.0245535373687744,
342
- "logps/chosen": -31.7435359954834,
343
- "logps/rejected": -33.9241828918457,
344
- "loss": 0.7947,
345
- "rewards/accuracies": 0.6499999761581421,
346
- "rewards/chosen": 0.1822136789560318,
347
- "rewards/margins": 0.23139998316764832,
348
- "rewards/rejected": -0.04918632656335831,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9063589572906494,
355
- "logits/rejected": -1.9211170673370361,
356
- "logps/chosen": -29.79791831970215,
357
- "logps/rejected": -31.584829330444336,
358
- "loss": 0.7478,
359
  "rewards/accuracies": 0.762499988079071,
360
- "rewards/chosen": 0.2508983910083771,
361
- "rewards/margins": 0.2722209095954895,
362
- "rewards/rejected": -0.02132250741124153,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9628435373306274,
369
- "logits/rejected": -1.9668251276016235,
370
- "logps/chosen": -33.099082946777344,
371
- "logps/rejected": -31.59071922302246,
372
- "loss": 0.7716,
373
- "rewards/accuracies": 0.675000011920929,
374
- "rewards/chosen": 0.25470608472824097,
375
- "rewards/margins": 0.28783971071243286,
376
- "rewards/rejected": -0.033133648335933685,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9614289999008179,
383
- "logits/rejected": -1.939612627029419,
384
- "logps/chosen": -33.833091735839844,
385
- "logps/rejected": -35.08003234863281,
386
- "loss": 0.7052,
387
- "rewards/accuracies": 0.6875,
388
- "rewards/chosen": 0.25653010606765747,
389
- "rewards/margins": 0.3599211871623993,
390
- "rewards/rejected": -0.10339111089706421,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.0032362937927246,
397
- "logits/rejected": -1.999916434288025,
398
- "logps/chosen": -32.70505142211914,
399
- "logps/rejected": -36.224037170410156,
400
- "loss": 0.8429,
401
- "rewards/accuracies": 0.6499999761581421,
402
- "rewards/chosen": 0.17601460218429565,
403
- "rewards/margins": 0.17530463635921478,
404
- "rewards/rejected": 0.0007099613430909812,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8701452016830444,
411
- "logits/rejected": -1.8677012920379639,
412
- "logps/chosen": -33.97339630126953,
413
- "logps/rejected": -35.50096130371094,
414
- "loss": 0.8408,
415
  "rewards/accuracies": 0.6875,
416
- "rewards/chosen": 0.17200371623039246,
417
- "rewards/margins": 0.16907431185245514,
418
- "rewards/rejected": 0.002929417882114649,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8542945384979248,
425
- "logits/rejected": -1.851894736289978,
426
- "logps/chosen": -34.1937370300293,
427
- "logps/rejected": -31.8076114654541,
428
- "loss": 0.8124,
429
- "rewards/accuracies": 0.6625000238418579,
430
- "rewards/chosen": 0.1630934774875641,
431
- "rewards/margins": 0.1921185553073883,
432
- "rewards/rejected": -0.029025081545114517,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9576352834701538,
439
- "logits/rejected": -1.9471737146377563,
440
- "logps/chosen": -34.961952209472656,
441
- "logps/rejected": -31.824676513671875,
442
- "loss": 0.7162,
443
- "rewards/accuracies": 0.75,
444
- "rewards/chosen": 0.31575411558151245,
445
- "rewards/margins": 0.30270126461982727,
446
- "rewards/rejected": 0.013052871450781822,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.052537202835083,
453
- "logits/rejected": -2.037666082382202,
454
- "logps/chosen": -30.679821014404297,
455
- "logps/rejected": -32.591346740722656,
456
- "loss": 0.8779,
457
- "rewards/accuracies": 0.6000000238418579,
458
- "rewards/chosen": 0.1954430788755417,
459
- "rewards/margins": 0.14705480635166168,
460
- "rewards/rejected": 0.048388272523880005,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9234060049057007,
467
- "logits/rejected": -1.9208656549453735,
468
- "logps/chosen": -32.422889709472656,
469
- "logps/rejected": -30.887353897094727,
470
- "loss": 0.6515,
471
- "rewards/accuracies": 0.75,
472
- "rewards/chosen": 0.4033745229244232,
473
- "rewards/margins": 0.45980948209762573,
474
- "rewards/rejected": -0.056434907019138336,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2234299182891846,
480
- "eval_logits/rejected": -2.218611001968384,
481
- "eval_logps/chosen": -34.04222106933594,
482
- "eval_logps/rejected": -37.57155990600586,
483
- "eval_loss": 0.9644458889961243,
484
- "eval_rewards/accuracies": 0.560215950012207,
485
- "eval_rewards/chosen": -0.006133326329290867,
486
- "eval_rewards/margins": 0.03781980276107788,
487
- "eval_rewards/rejected": -0.04395313188433647,
488
- "eval_runtime": 145.7457,
489
  "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
@@ -493,121 +493,121 @@
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9086744785308838,
497
- "logits/rejected": -1.905432939529419,
498
- "logps/chosen": -31.29376220703125,
499
- "logps/rejected": -33.765647888183594,
500
- "loss": 0.7696,
501
- "rewards/accuracies": 0.7124999761581421,
502
- "rewards/chosen": 0.24766740202903748,
503
- "rewards/margins": 0.2664671242237091,
504
- "rewards/rejected": -0.01879967749118805,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.958169937133789,
511
- "logits/rejected": -1.9459987878799438,
512
- "logps/chosen": -34.305747985839844,
513
- "logps/rejected": -33.66083526611328,
514
- "loss": 0.7197,
515
- "rewards/accuracies": 0.6875,
516
- "rewards/chosen": 0.23226289451122284,
517
- "rewards/margins": 0.32985854148864746,
518
- "rewards/rejected": -0.0975956842303276,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9929885864257812,
525
- "logits/rejected": -1.9915746450424194,
526
- "logps/chosen": -33.1512336730957,
527
- "logps/rejected": -32.53498077392578,
528
- "loss": 0.7279,
529
- "rewards/accuracies": 0.75,
530
- "rewards/chosen": 0.26764601469039917,
531
- "rewards/margins": 0.3119625747203827,
532
- "rewards/rejected": -0.04431656002998352,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0791096687316895,
539
- "logits/rejected": -2.063427686691284,
540
- "logps/chosen": -33.73707580566406,
541
- "logps/rejected": -33.045745849609375,
542
- "loss": 0.7446,
543
- "rewards/accuracies": 0.6875,
544
- "rewards/chosen": 0.3538528084754944,
545
- "rewards/margins": 0.2960215210914612,
546
- "rewards/rejected": 0.05783123895525932,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.952497124671936,
553
- "logits/rejected": -1.951664686203003,
554
- "logps/chosen": -32.81846618652344,
555
- "logps/rejected": -32.528770446777344,
556
- "loss": 0.6539,
557
- "rewards/accuracies": 0.75,
558
- "rewards/chosen": 0.37090426683425903,
559
- "rewards/margins": 0.42179951071739197,
560
- "rewards/rejected": -0.05089529603719711,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.907370924949646,
567
- "logits/rejected": -1.9176517724990845,
568
- "logps/chosen": -31.86977195739746,
569
- "logps/rejected": -35.2895393371582,
570
- "loss": 0.7562,
571
  "rewards/accuracies": 0.675000011920929,
572
- "rewards/chosen": 0.2679324746131897,
573
- "rewards/margins": 0.26855653524398804,
574
- "rewards/rejected": -0.0006240725633688271,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.047156572341919,
581
- "logits/rejected": -2.0407111644744873,
582
- "logps/chosen": -33.288124084472656,
583
- "logps/rejected": -29.224395751953125,
584
- "loss": 0.7344,
585
- "rewards/accuracies": 0.699999988079071,
586
- "rewards/chosen": 0.2910745441913605,
587
- "rewards/margins": 0.28944963216781616,
588
- "rewards/rejected": 0.0016249760519713163,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9066269397735596,
595
- "logits/rejected": -1.9088102579116821,
596
- "logps/chosen": -33.847068786621094,
597
- "logps/rejected": -30.924524307250977,
598
- "loss": 0.7185,
599
- "rewards/accuracies": 0.7250000238418579,
600
- "rewards/chosen": 0.31358352303504944,
601
- "rewards/margins": 0.35510215163230896,
602
- "rewards/rejected": -0.04151865094900131,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.8298485310046704,
610
- "train_runtime": 3249.7236,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.866413950920105,
29
+ "logits/rejected": -1.8707411289215088,
30
+ "logps/chosen": -36.98916244506836,
31
+ "logps/rejected": -33.67436981201172,
32
+ "loss": 0.6701,
33
+ "rewards/accuracies": 0.5416666865348816,
34
+ "rewards/chosen": 0.01569323241710663,
35
+ "rewards/margins": 0.05555717274546623,
36
+ "rewards/rejected": -0.039863936603069305,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9979650974273682,
43
+ "logits/rejected": -2.0006086826324463,
44
+ "logps/chosen": -29.624820709228516,
45
+ "logps/rejected": -29.0762939453125,
46
+ "loss": 0.6837,
47
+ "rewards/accuracies": 0.5874999761581421,
48
+ "rewards/chosen": 0.01563635841012001,
49
+ "rewards/margins": 0.027204299345612526,
50
+ "rewards/rejected": -0.01156794372946024,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.921021819114685,
57
+ "logits/rejected": -1.9183374643325806,
58
+ "logps/chosen": -31.40532875061035,
59
+ "logps/rejected": -33.23241424560547,
60
+ "loss": 0.6877,
61
+ "rewards/accuracies": 0.5249999761581421,
62
+ "rewards/chosen": 0.00968973059207201,
63
+ "rewards/margins": 0.022251319140195847,
64
+ "rewards/rejected": -0.012561586685478687,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0176353454589844,
71
+ "logits/rejected": -2.008906364440918,
72
+ "logps/chosen": -32.574256896972656,
73
+ "logps/rejected": -32.53368377685547,
74
+ "loss": 0.6874,
75
+ "rewards/accuracies": 0.512499988079071,
76
+ "rewards/chosen": 0.0022967704571783543,
77
+ "rewards/margins": 0.02120940014719963,
78
+ "rewards/rejected": -0.018912632018327713,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8619186878204346,
85
+ "logits/rejected": -1.85114324092865,
86
+ "logps/chosen": -33.55537414550781,
87
+ "logps/rejected": -35.45675277709961,
88
+ "loss": 0.6957,
89
+ "rewards/accuracies": 0.48750001192092896,
90
+ "rewards/chosen": 0.001892436295747757,
91
+ "rewards/margins": 0.005858602002263069,
92
+ "rewards/rejected": -0.003966164775192738,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9400945901870728,
99
+ "logits/rejected": -1.9420464038848877,
100
+ "logps/chosen": -32.56509780883789,
101
+ "logps/rejected": -33.2406120300293,
102
+ "loss": 0.6632,
103
+ "rewards/accuracies": 0.6499999761581421,
104
+ "rewards/chosen": 0.031578924506902695,
105
+ "rewards/margins": 0.09388783574104309,
106
+ "rewards/rejected": -0.062308914959430695,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0712790489196777,
113
+ "logits/rejected": -2.0762436389923096,
114
+ "logps/chosen": -33.981910705566406,
115
+ "logps/rejected": -36.62363815307617,
116
+ "loss": 0.6833,
117
  "rewards/accuracies": 0.5249999761581421,
118
+ "rewards/chosen": 0.005918038543313742,
119
+ "rewards/margins": 0.05520814657211304,
120
+ "rewards/rejected": -0.04929010197520256,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9327905178070068,
127
+ "logits/rejected": -1.935909628868103,
128
+ "logps/chosen": -34.32685470581055,
129
+ "logps/rejected": -34.65606689453125,
130
+ "loss": 0.639,
131
+ "rewards/accuracies": 0.6625000238418579,
132
+ "rewards/chosen": 0.09085920453071594,
133
+ "rewards/margins": 0.14815348386764526,
134
+ "rewards/rejected": -0.057294271886348724,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9414918422698975,
141
+ "logits/rejected": -1.946007490158081,
142
+ "logps/chosen": -32.406803131103516,
143
+ "logps/rejected": -32.36021041870117,
144
+ "loss": 0.6792,
145
+ "rewards/accuracies": 0.6000000238418579,
146
+ "rewards/chosen": 0.054556868970394135,
147
+ "rewards/margins": 0.05573350936174393,
148
+ "rewards/rejected": -0.0011766403913497925,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.039034128189087,
155
+ "logits/rejected": -2.0370402336120605,
156
+ "logps/chosen": -32.172786712646484,
157
+ "logps/rejected": -31.333194732666016,
158
+ "loss": 0.6464,
159
+ "rewards/accuracies": 0.625,
160
+ "rewards/chosen": 0.06124376505613327,
161
+ "rewards/margins": 0.12152798473834991,
162
+ "rewards/rejected": -0.06028420478105545,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2339773178100586,
168
+ "eval_logits/rejected": -2.229137420654297,
169
+ "eval_logps/chosen": -34.04054641723633,
170
+ "eval_logps/rejected": -37.549957275390625,
171
+ "eval_loss": 0.6902773976325989,
172
+ "eval_rewards/accuracies": 0.5685215592384338,
173
+ "eval_rewards/chosen": -0.005393954925239086,
174
+ "eval_rewards/margins": 0.024608083069324493,
175
+ "eval_rewards/rejected": -0.030002037063241005,
176
+ "eval_runtime": 146.034,
177
  "eval_samples_per_second": 2.349,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
 
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.994192123413086,
185
+ "logits/rejected": -1.9918158054351807,
186
+ "logps/chosen": -33.142940521240234,
187
+ "logps/rejected": -34.01188278198242,
188
+ "loss": 0.6911,
189
+ "rewards/accuracies": 0.5375000238418579,
190
+ "rewards/chosen": 0.09078876674175262,
191
+ "rewards/margins": 0.07505009323358536,
192
+ "rewards/rejected": 0.015738680958747864,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0053954124450684,
199
+ "logits/rejected": -1.997046709060669,
200
+ "logps/chosen": -32.33894348144531,
201
+ "logps/rejected": -32.1308708190918,
202
+ "loss": 0.6746,
203
+ "rewards/accuracies": 0.5625,
204
+ "rewards/chosen": 0.09536493569612503,
205
+ "rewards/margins": 0.06779730319976807,
206
+ "rewards/rejected": 0.027567636221647263,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0336387157440186,
213
+ "logits/rejected": -2.025650978088379,
214
+ "logps/chosen": -30.345691680908203,
215
+ "logps/rejected": -32.078697204589844,
216
+ "loss": 0.6527,
217
+ "rewards/accuracies": 0.612500011920929,
218
+ "rewards/chosen": 0.11702337116003036,
219
+ "rewards/margins": 0.14014457166194916,
220
+ "rewards/rejected": -0.023121213540434837,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9642337560653687,
227
+ "logits/rejected": -1.9744552373886108,
228
+ "logps/chosen": -31.243911743164062,
229
+ "logps/rejected": -32.590267181396484,
230
+ "loss": 0.6171,
231
+ "rewards/accuracies": 0.6625000238418579,
232
+ "rewards/chosen": 0.1581769436597824,
233
+ "rewards/margins": 0.20802685618400574,
234
+ "rewards/rejected": -0.04984992742538452,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.876604437828064,
241
+ "logits/rejected": -1.8777605295181274,
242
+ "logps/chosen": -33.938690185546875,
243
+ "logps/rejected": -34.807891845703125,
244
+ "loss": 0.6043,
245
+ "rewards/accuracies": 0.612500011920929,
246
+ "rewards/chosen": 0.22860188782215118,
247
+ "rewards/margins": 0.2741745412349701,
248
+ "rewards/rejected": -0.0455726757645607,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9282041788101196,
255
+ "logits/rejected": -1.9247684478759766,
256
+ "logps/chosen": -36.02125930786133,
257
+ "logps/rejected": -32.71831130981445,
258
+ "loss": 0.6454,
259
+ "rewards/accuracies": 0.612500011920929,
260
+ "rewards/chosen": 0.13537634909152985,
261
+ "rewards/margins": 0.13137592375278473,
262
+ "rewards/rejected": 0.004000450484454632,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.029125928878784,
269
+ "logits/rejected": -2.0217747688293457,
270
+ "logps/chosen": -33.49839401245117,
271
+ "logps/rejected": -31.400177001953125,
272
+ "loss": 0.5828,
273
+ "rewards/accuracies": 0.637499988079071,
274
+ "rewards/chosen": 0.26951926946640015,
275
+ "rewards/margins": 0.3130132555961609,
276
+ "rewards/rejected": -0.04349397122859955,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0355944633483887,
283
+ "logits/rejected": -2.040832042694092,
284
+ "logps/chosen": -32.235923767089844,
285
+ "logps/rejected": -32.460418701171875,
286
+ "loss": 0.5943,
287
  "rewards/accuracies": 0.699999988079071,
288
+ "rewards/chosen": 0.2779761850833893,
289
+ "rewards/margins": 0.2557251751422882,
290
+ "rewards/rejected": 0.02225096896290779,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0362112522125244,
297
+ "logits/rejected": -2.0334599018096924,
298
+ "logps/chosen": -31.269250869750977,
299
+ "logps/rejected": -31.325435638427734,
300
+ "loss": 0.6245,
301
+ "rewards/accuracies": 0.637499988079071,
302
+ "rewards/chosen": 0.19773444533348083,
303
+ "rewards/margins": 0.20423230528831482,
304
+ "rewards/rejected": -0.0064978525042533875,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9060389995574951,
311
+ "logits/rejected": -1.9106788635253906,
312
+ "logps/chosen": -31.306299209594727,
313
+ "logps/rejected": -32.81407165527344,
314
+ "loss": 0.5931,
315
+ "rewards/accuracies": 0.7124999761581421,
316
+ "rewards/chosen": 0.2699825167655945,
317
+ "rewards/margins": 0.2908058166503906,
318
+ "rewards/rejected": -0.02082330361008644,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.231553792953491,
324
+ "eval_logits/rejected": -2.2267112731933594,
325
+ "eval_logps/chosen": -34.07304763793945,
326
+ "eval_logps/rejected": -37.57693862915039,
327
+ "eval_loss": 0.6979728937149048,
328
+ "eval_rewards/accuracies": 0.5157807469367981,
329
+ "eval_rewards/chosen": -0.03464451804757118,
330
+ "eval_rewards/margins": 0.019641490653157234,
331
+ "eval_rewards/rejected": -0.054286014288663864,
332
+ "eval_runtime": 145.8095,
333
+ "eval_samples_per_second": 2.352,
334
+ "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.018519163131714,
341
+ "logits/rejected": -2.0291810035705566,
342
+ "logps/chosen": -31.742992401123047,
343
+ "logps/rejected": -33.946937561035156,
344
+ "loss": 0.5902,
345
+ "rewards/accuracies": 0.675000011920929,
346
+ "rewards/chosen": 0.2054794579744339,
347
+ "rewards/margins": 0.2812942862510681,
348
+ "rewards/rejected": -0.07581482082605362,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.911586046218872,
355
+ "logits/rejected": -1.9263393878936768,
356
+ "logps/chosen": -29.84616470336914,
357
+ "logps/rejected": -31.615009307861328,
358
+ "loss": 0.5879,
359
  "rewards/accuracies": 0.762499988079071,
360
+ "rewards/chosen": 0.23883743584156036,
361
+ "rewards/margins": 0.2899848222732544,
362
+ "rewards/rejected": -0.051147449761629105,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9677941799163818,
369
+ "logits/rejected": -1.9717823266983032,
370
+ "logps/chosen": -33.100074768066406,
371
+ "logps/rejected": -31.62213134765625,
372
+ "loss": 0.5748,
373
+ "rewards/accuracies": 0.737500011920929,
374
+ "rewards/chosen": 0.28565075993537903,
375
+ "rewards/margins": 0.3511958718299866,
376
+ "rewards/rejected": -0.06554517149925232,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9661725759506226,
383
+ "logits/rejected": -1.944300651550293,
384
+ "logps/chosen": -33.841453552246094,
385
+ "logps/rejected": -35.11375045776367,
386
+ "loss": 0.5473,
387
+ "rewards/accuracies": 0.7875000238418579,
388
+ "rewards/chosen": 0.2810631990432739,
389
+ "rewards/margins": 0.4277234673500061,
390
+ "rewards/rejected": -0.14666026830673218,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.007416009902954,
397
+ "logits/rejected": -2.0040948390960693,
398
+ "logps/chosen": -32.70330810546875,
399
+ "logps/rejected": -36.29412841796875,
400
+ "loss": 0.5992,
401
+ "rewards/accuracies": 0.6875,
402
+ "rewards/chosen": 0.1995842456817627,
403
+ "rewards/margins": 0.2618715763092041,
404
+ "rewards/rejected": -0.06228730082511902,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8749721050262451,
411
+ "logits/rejected": -1.8725513219833374,
412
+ "logps/chosen": -34.00068664550781,
413
+ "logps/rejected": -35.53888702392578,
414
+ "loss": 0.6254,
415
  "rewards/accuracies": 0.6875,
416
+ "rewards/chosen": 0.16894161701202393,
417
+ "rewards/margins": 0.1997825801372528,
418
+ "rewards/rejected": -0.030840963125228882,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8600317239761353,
425
+ "logits/rejected": -1.8576066493988037,
426
+ "logps/chosen": -34.1875,
427
+ "logps/rejected": -31.8159122467041,
428
+ "loss": 0.616,
429
+ "rewards/accuracies": 0.7124999761581421,
430
+ "rewards/chosen": 0.1890900433063507,
431
+ "rewards/margins": 0.22921428084373474,
432
+ "rewards/rejected": -0.04012420028448105,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9631398916244507,
439
+ "logits/rejected": -1.9526073932647705,
440
+ "logps/chosen": -35.023719787597656,
441
+ "logps/rejected": -31.869693756103516,
442
+ "loss": 0.5782,
443
+ "rewards/accuracies": 0.699999988079071,
444
+ "rewards/chosen": 0.29963088035583496,
445
+ "rewards/margins": 0.32546472549438477,
446
+ "rewards/rejected": -0.025833839550614357,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0582926273345947,
453
+ "logits/rejected": -2.0433640480041504,
454
+ "logps/chosen": -30.733753204345703,
455
+ "logps/rejected": -32.67460632324219,
456
+ "loss": 0.6392,
457
+ "rewards/accuracies": 0.637499988079071,
458
+ "rewards/chosen": 0.17133468389511108,
459
+ "rewards/margins": 0.19182677567005157,
460
+ "rewards/rejected": -0.020492086187005043,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.929610013961792,
467
+ "logits/rejected": -1.9270601272583008,
468
+ "logps/chosen": -32.42620086669922,
469
+ "logps/rejected": -30.873455047607422,
470
+ "loss": 0.5301,
471
+ "rewards/accuracies": 0.737500011920929,
472
+ "rewards/chosen": 0.450817346572876,
473
+ "rewards/margins": 0.5018006563186646,
474
+ "rewards/rejected": -0.050983332097530365,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.229154348373413,
480
+ "eval_logits/rejected": -2.2243051528930664,
481
+ "eval_logps/chosen": -34.09621810913086,
482
+ "eval_logps/rejected": -37.59999084472656,
483
+ "eval_loss": 0.6972895860671997,
484
+ "eval_rewards/accuracies": 0.5390365719795227,
485
+ "eval_rewards/chosen": -0.05550166219472885,
486
+ "eval_rewards/margins": 0.019528048112988472,
487
+ "eval_rewards/rejected": -0.07502970844507217,
488
+ "eval_runtime": 145.7792,
489
  "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
 
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9142345190048218,
497
+ "logits/rejected": -1.9109809398651123,
498
+ "logps/chosen": -31.33791732788086,
499
+ "logps/rejected": -33.82014465332031,
500
+ "loss": 0.5861,
501
+ "rewards/accuracies": 0.75,
502
+ "rewards/chosen": 0.23888680338859558,
503
+ "rewards/margins": 0.30907896161079407,
504
+ "rewards/rejected": -0.07019217312335968,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9650068283081055,
511
+ "logits/rejected": -1.9527791738510132,
512
+ "logps/chosen": -34.34791946411133,
513
+ "logps/rejected": -33.650447845458984,
514
+ "loss": 0.5828,
515
+ "rewards/accuracies": 0.6499999761581421,
516
+ "rewards/chosen": 0.22334297001361847,
517
+ "rewards/margins": 0.3237887918949127,
518
+ "rewards/rejected": -0.10044582933187485,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.00040602684021,
525
+ "logits/rejected": -1.9989902973175049,
526
+ "logps/chosen": -33.210105895996094,
527
+ "logps/rejected": -32.56142807006836,
528
+ "loss": 0.5803,
529
+ "rewards/accuracies": 0.699999988079071,
530
+ "rewards/chosen": 0.24812059104442596,
531
+ "rewards/margins": 0.32178014516830444,
532
+ "rewards/rejected": -0.07365953922271729,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0870866775512695,
539
+ "logits/rejected": -2.0713772773742676,
540
+ "logps/chosen": -33.80995178222656,
541
+ "logps/rejected": -33.120697021484375,
542
+ "loss": 0.5723,
543
+ "rewards/accuracies": 0.75,
544
+ "rewards/chosen": 0.3324963450431824,
545
+ "rewards/margins": 0.33488941192626953,
546
+ "rewards/rejected": -0.0023930787574499846,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.959240198135376,
553
+ "logits/rejected": -1.9583876132965088,
554
+ "logps/chosen": -32.863216400146484,
555
+ "logps/rejected": -32.54397201538086,
556
+ "loss": 0.5523,
557
+ "rewards/accuracies": 0.7124999761581421,
558
+ "rewards/chosen": 0.37699171900749207,
559
+ "rewards/margins": 0.44793614745140076,
560
+ "rewards/rejected": -0.0709443911910057,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9147189855575562,
567
+ "logits/rejected": -1.9250224828720093,
568
+ "logps/chosen": -31.902795791625977,
569
+ "logps/rejected": -35.3552131652832,
570
+ "loss": 0.5743,
571
  "rewards/accuracies": 0.675000011920929,
572
+ "rewards/chosen": 0.2717086672782898,
573
+ "rewards/margins": 0.33151620626449585,
574
+ "rewards/rejected": -0.05980752781033516,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.05413556098938,
581
+ "logits/rejected": -2.047651767730713,
582
+ "logps/chosen": -33.377376556396484,
583
+ "logps/rejected": -29.2799072265625,
584
+ "loss": 0.5801,
585
+ "rewards/accuracies": 0.7250000238418579,
586
+ "rewards/chosen": 0.24713313579559326,
587
+ "rewards/margins": 0.2952673137187958,
588
+ "rewards/rejected": -0.04813414067029953,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9141871929168701,
595
+ "logits/rejected": -1.9163949489593506,
596
+ "logps/chosen": -33.87698745727539,
597
+ "logps/rejected": -30.976858139038086,
598
+ "loss": 0.5489,
599
+ "rewards/accuracies": 0.699999988079071,
600
+ "rewards/chosen": 0.32585546374320984,
601
+ "rewards/margins": 0.4196627140045166,
602
+ "rewards/rejected": -0.09380728751420975,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.6175476637753573,
610
+ "train_runtime": 3252.7839,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }