hugodk-sch commited on
Commit
94a31fe
1 Parent(s): dfd9069

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +375 -375
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.9715
24
- - Rewards/chosen: -0.0427
25
- - Rewards/rejected: -0.0715
26
- - Rewards/accuracies: 0.5365
27
- - Rewards/margins: 0.0289
28
- - Logps/rejected: -37.6359
29
- - Logps/chosen: -34.1056
30
- - Logits/rejected: -2.2191
31
- - Logits/chosen: -2.2239
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.9204 | 0.26 | 100 | 0.9816 | -0.0008 | -0.0193 | 0.5395 | 0.0185 | -37.5487 | -34.0358 | -2.2268 | -2.2317 |
67
- | 0.8045 | 0.52 | 200 | 0.9791 | -0.0166 | -0.0378 | 0.5361 | 0.0211 | -37.5796 | -34.0623 | -2.2238 | -2.2287 |
68
- | 0.7186 | 0.78 | 300 | 0.9732 | -0.0459 | -0.0732 | 0.5071 | 0.0273 | -37.6386 | -34.1110 | -2.2194 | -2.2243 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.6903
21
+ - Rewards/chosen: -0.0237
22
+ - Rewards/rejected: -0.0486
23
+ - Rewards/accuracies: 0.5191
24
+ - Rewards/margins: 0.0249
25
+ - Logps/rejected: -37.5860
26
+ - Logps/chosen: -34.0684
27
+ - Logits/rejected: -2.2229
28
+ - Logits/chosen: -2.2277
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6577 | 0.26 | 100 | 0.6949 | -0.0038 | -0.0133 | 0.5316 | 0.0095 | -37.5357 | -34.0400 | -2.2289 | -2.2338 |
64
+ | 0.6156 | 0.52 | 200 | 0.6943 | -0.0132 | -0.0288 | 0.5191 | 0.0156 | -37.5578 | -34.0535 | -2.2268 | -2.2317 |
65
+ | 0.5468 | 0.78 | 300 | 0.6903 | -0.0237 | -0.0486 | 0.5191 | 0.0249 | -37.5860 | -34.0684 | -2.2229 | -2.2277 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bd332dec561f01d2bca2a6e7821277394bd0e424cf6dba7b5419a5e722eefb2
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47298c65412662b4ec4aa64bcd24a4e16a9a56108e7e9e1f87d87e4b59545e2b
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.223938465118408,
4
- "eval_logits/rejected": -2.2191123962402344,
5
- "eval_logps/chosen": -34.10564041137695,
6
- "eval_logps/rejected": -37.63585662841797,
7
- "eval_loss": 0.9714530110359192,
8
- "eval_rewards/accuracies": 0.5365448594093323,
9
- "eval_rewards/chosen": -0.042656492441892624,
10
- "eval_rewards/margins": 0.028885547071695328,
11
- "eval_rewards/rejected": -0.07154203206300735,
12
- "eval_runtime": 145.6066,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.356,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.859264765157328,
17
- "train_runtime": 3251.7628,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6291831790626823,
4
+ "train_runtime": 3251.1508,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.859264765157328,
4
- "train_runtime": 3251.7628,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6291831790626823,
4
+ "train_runtime": 3251.1508,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,589 +25,589 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.866829752922058,
29
- "logits/rejected": -1.8711390495300293,
30
- "logps/chosen": -36.97040557861328,
31
- "logps/rejected": -33.66280746459961,
32
- "loss": 0.9586,
33
- "rewards/accuracies": 0.5972222089767456,
34
- "rewards/chosen": 0.021715592592954636,
35
- "rewards/margins": 0.041353289037942886,
36
- "rewards/rejected": -0.01963769644498825,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9976301193237305,
43
- "logits/rejected": -2.0002708435058594,
44
- "logps/chosen": -29.64252281188965,
45
- "logps/rejected": -29.05857276916504,
46
- "loss": 1.0031,
47
- "rewards/accuracies": 0.4749999940395355,
48
- "rewards/chosen": -0.00019791116937994957,
49
- "rewards/margins": -0.003119796048849821,
50
- "rewards/rejected": 0.0029218837153166533,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9210872650146484,
57
- "logits/rejected": -1.918402910232544,
58
- "logps/chosen": -31.397838592529297,
59
- "logps/rejected": -33.22719192504883,
60
- "loss": 0.9838,
61
- "rewards/accuracies": 0.612500011920929,
62
- "rewards/chosen": 0.010952453128993511,
63
- "rewards/margins": 0.01619068905711174,
64
- "rewards/rejected": -0.005238235928118229,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.017606258392334,
71
- "logits/rejected": -2.0088634490966797,
72
- "logps/chosen": -32.566612243652344,
73
- "logps/rejected": -32.52539825439453,
74
- "loss": 0.9862,
75
- "rewards/accuracies": 0.5375000238418579,
76
- "rewards/chosen": 0.006120447069406509,
77
- "rewards/margins": 0.013754432089626789,
78
- "rewards/rejected": -0.007633985485881567,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8625805377960205,
85
- "logits/rejected": -1.8518139123916626,
86
- "logps/chosen": -33.56818771362305,
87
- "logps/rejected": -35.4713020324707,
88
- "loss": 0.9951,
89
- "rewards/accuracies": 0.550000011920929,
90
- "rewards/chosen": -0.0064262161031365395,
91
- "rewards/margins": 0.004948456306010485,
92
- "rewards/rejected": -0.011374671943485737,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9413652420043945,
99
- "logits/rejected": -1.9432977437973022,
100
- "logps/chosen": -32.551727294921875,
101
- "logps/rejected": -33.218013763427734,
102
- "loss": 0.9525,
103
- "rewards/accuracies": 0.5375000238418579,
104
- "rewards/chosen": 0.02907358668744564,
105
- "rewards/margins": 0.05705299973487854,
106
- "rewards/rejected": -0.02797941491007805,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0722384452819824,
113
- "logits/rejected": -2.0772085189819336,
114
- "logps/chosen": -33.995296478271484,
115
- "logps/rejected": -36.63286590576172,
116
- "loss": 0.9657,
117
- "rewards/accuracies": 0.550000011920929,
118
- "rewards/chosen": -0.004085049033164978,
119
- "rewards/margins": 0.034312374889850616,
120
- "rewards/rejected": -0.038397423923015594,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9325292110443115,
127
- "logits/rejected": -1.93563973903656,
128
- "logps/chosen": -34.35693359375,
129
- "logps/rejected": -34.67926788330078,
130
- "loss": 0.9069,
131
- "rewards/accuracies": 0.574999988079071,
132
- "rewards/chosen": 0.042526502162218094,
133
- "rewards/margins": 0.09463920444250107,
134
- "rewards/rejected": -0.05211270600557327,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9406629800796509,
141
- "logits/rejected": -1.9451707601547241,
142
- "logps/chosen": -32.38554000854492,
143
- "logps/rejected": -32.36783218383789,
144
- "loss": 0.9455,
145
- "rewards/accuracies": 0.612500011920929,
146
- "rewards/chosen": 0.04912734776735306,
147
- "rewards/margins": 0.05448007583618164,
148
- "rewards/rejected": -0.005352728068828583,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.037841320037842,
155
- "logits/rejected": -2.0358526706695557,
156
- "logps/chosen": -32.14287185668945,
157
- "logps/rejected": -31.300832748413086,
158
- "loss": 0.9204,
159
- "rewards/accuracies": 0.5874999761581421,
160
- "rewards/chosen": 0.058778904378414154,
161
- "rewards/margins": 0.07955379039049149,
162
- "rewards/rejected": -0.02077488601207733,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2316644191741943,
168
- "eval_logits/rejected": -2.2268166542053223,
169
- "eval_logps/chosen": -34.03582000732422,
170
- "eval_logps/rejected": -37.548709869384766,
171
- "eval_loss": 0.9816102385520935,
172
- "eval_rewards/accuracies": 0.5394518375396729,
173
- "eval_rewards/chosen": -0.0007606110884808004,
174
- "eval_rewards/margins": 0.018493397161364555,
175
- "eval_rewards/rejected": -0.019254004582762718,
176
- "eval_runtime": 145.9929,
177
- "eval_samples_per_second": 2.349,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9917171001434326,
185
- "logits/rejected": -1.9893367290496826,
186
- "logps/chosen": -33.120052337646484,
187
- "logps/rejected": -34.02777099609375,
188
- "loss": 0.9329,
189
- "rewards/accuracies": 0.675000011920929,
190
- "rewards/chosen": 0.0742575079202652,
191
- "rewards/margins": 0.0732978954911232,
192
- "rewards/rejected": 0.0009596304735168815,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0032849311828613,
199
- "logits/rejected": -1.9949623346328735,
200
- "logps/chosen": -32.30955123901367,
201
- "logps/rejected": -32.144927978515625,
202
- "loss": 0.9308,
203
- "rewards/accuracies": 0.612500011920929,
204
- "rewards/chosen": 0.0812125876545906,
205
- "rewards/margins": 0.07126720249652863,
206
- "rewards/rejected": 0.009945395402610302,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.031322956085205,
213
- "logits/rejected": -2.023355007171631,
214
- "logps/chosen": -30.32772445678711,
215
- "logps/rejected": -32.05691909790039,
216
- "loss": 0.9282,
217
- "rewards/accuracies": 0.550000011920929,
218
- "rewards/chosen": 0.08879294246435165,
219
- "rewards/margins": 0.09113965928554535,
220
- "rewards/rejected": -0.002346712863072753,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9619598388671875,
227
- "logits/rejected": -1.9721866846084595,
228
- "logps/chosen": -31.2375431060791,
229
- "logps/rejected": -32.572418212890625,
230
- "loss": 0.8702,
231
- "rewards/accuracies": 0.625,
232
- "rewards/chosen": 0.1092713475227356,
233
- "rewards/margins": 0.13179358839988708,
234
- "rewards/rejected": -0.02252225950360298,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8724457025527954,
241
- "logits/rejected": -1.8736213445663452,
242
- "logps/chosen": -33.905731201171875,
243
- "logps/rejected": -34.80728530883789,
244
- "loss": 0.818,
245
- "rewards/accuracies": 0.637499988079071,
246
- "rewards/chosen": 0.17217601835727692,
247
- "rewards/margins": 0.20219452679157257,
248
- "rewards/rejected": -0.03001854196190834,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.923604965209961,
255
- "logits/rejected": -1.9202001094818115,
256
- "logps/chosen": -36.00878143310547,
257
- "logps/rejected": -32.71224594116211,
258
- "loss": 0.9086,
259
- "rewards/accuracies": 0.637499988079071,
260
- "rewards/chosen": 0.09773740917444229,
261
- "rewards/margins": 0.09143415838479996,
262
- "rewards/rejected": 0.0063032531179487705,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.024691343307495,
269
- "logits/rejected": -2.0173799991607666,
270
- "logps/chosen": -33.495445251464844,
271
- "logps/rejected": -31.415653228759766,
272
- "loss": 0.7882,
273
- "rewards/accuracies": 0.737500011920929,
274
- "rewards/chosen": 0.18144862353801727,
275
- "rewards/margins": 0.21973037719726562,
276
- "rewards/rejected": -0.03828175365924835,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.031024694442749,
283
- "logits/rejected": -2.036294937133789,
284
- "logps/chosen": -32.22355270385742,
285
- "logps/rejected": -32.47669219970703,
286
- "loss": 0.8225,
287
- "rewards/accuracies": 0.6875,
288
- "rewards/chosen": 0.19274193048477173,
289
- "rewards/margins": 0.1876741349697113,
290
- "rewards/rejected": 0.00506778946146369,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.0325100421905518,
297
- "logits/rejected": -2.029745578765869,
298
- "logps/chosen": -31.281234741210938,
299
- "logps/rejected": -31.360708236694336,
300
- "loss": 0.8564,
301
- "rewards/accuracies": 0.625,
302
- "rewards/chosen": 0.12463297694921494,
303
- "rewards/margins": 0.15012845396995544,
304
- "rewards/rejected": -0.025495493784546852,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.902600646018982,
311
- "logits/rejected": -1.9072542190551758,
312
- "logps/chosen": -31.33133316040039,
313
- "logps/rejected": -32.85138702392578,
314
- "loss": 0.8045,
315
  "rewards/accuracies": 0.7124999761581421,
316
- "rewards/chosen": 0.16497036814689636,
317
- "rewards/margins": 0.2012440711259842,
318
- "rewards/rejected": -0.03627369925379753,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2286624908447266,
324
- "eval_logits/rejected": -2.2238309383392334,
325
- "eval_logps/chosen": -34.062259674072266,
326
- "eval_logps/rejected": -37.57956314086914,
327
- "eval_loss": 0.9791061282157898,
328
- "eval_rewards/accuracies": 0.5361295938491821,
329
- "eval_rewards/chosen": -0.016622914001345634,
330
- "eval_rewards/margins": 0.021141981706023216,
331
- "eval_rewards/rejected": -0.03776489570736885,
332
- "eval_runtime": 145.8001,
333
- "eval_samples_per_second": 2.353,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.0155138969421387,
341
- "logits/rejected": -2.0261716842651367,
342
- "logps/chosen": -31.7918758392334,
343
- "logps/rejected": -33.9483528137207,
344
- "loss": 0.8508,
345
- "rewards/accuracies": 0.6499999761581421,
346
- "rewards/chosen": 0.10765652358531952,
347
- "rewards/margins": 0.15904827415943146,
348
- "rewards/rejected": -0.051391761749982834,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.908071756362915,
355
- "logits/rejected": -1.9228509664535522,
356
- "logps/chosen": -29.81874656677246,
357
- "logps/rejected": -31.65224266052246,
358
- "loss": 0.7758,
359
- "rewards/accuracies": 0.7124999761581421,
360
- "rewards/chosen": 0.175676628947258,
361
- "rewards/margins": 0.23211655020713806,
362
- "rewards/rejected": -0.05643991753458977,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9642655849456787,
369
- "logits/rejected": -1.9682365655899048,
370
- "logps/chosen": -33.13427734375,
371
- "logps/rejected": -31.658504486083984,
372
- "loss": 0.7961,
373
- "rewards/accuracies": 0.699999988079071,
374
- "rewards/chosen": 0.16991479694843292,
375
- "rewards/margins": 0.23543640971183777,
376
- "rewards/rejected": -0.06552163511514664,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9623470306396484,
383
- "logits/rejected": -1.9405139684677124,
384
- "logps/chosen": -33.870643615722656,
385
- "logps/rejected": -35.13296127319336,
386
- "loss": 0.7551,
387
  "rewards/accuracies": 0.7250000238418579,
388
- "rewards/chosen": 0.16986075043678284,
389
- "rewards/margins": 0.279163658618927,
390
- "rewards/rejected": -0.10930290073156357,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.003516674041748,
397
- "logits/rejected": -2.0002071857452393,
398
- "logps/chosen": -32.743778228759766,
399
- "logps/rejected": -36.293540954589844,
400
- "loss": 0.867,
401
  "rewards/accuracies": 0.6499999761581421,
402
- "rewards/chosen": 0.10877454280853271,
403
- "rewards/margins": 0.14994443953037262,
404
- "rewards/rejected": -0.041169874370098114,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8702592849731445,
411
- "logits/rejected": -1.867851972579956,
412
- "logps/chosen": -33.99380111694336,
413
- "logps/rejected": -35.564971923828125,
414
- "loss": 0.85,
415
- "rewards/accuracies": 0.7124999761581421,
416
- "rewards/chosen": 0.11675956100225449,
417
- "rewards/margins": 0.15297001600265503,
418
- "rewards/rejected": -0.03621045500040054,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8547359704971313,
425
- "logits/rejected": -1.8523473739624023,
426
- "logps/chosen": -34.247703552246094,
427
- "logps/rejected": -31.8588809967041,
428
- "loss": 0.8606,
429
- "rewards/accuracies": 0.574999988079071,
430
- "rewards/chosen": 0.0899367406964302,
431
- "rewards/margins": 0.14246629178524017,
432
- "rewards/rejected": -0.05252955108880997,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9580624103546143,
439
- "logits/rejected": -1.9475984573364258,
440
- "logps/chosen": -35.032501220703125,
441
- "logps/rejected": -31.907541275024414,
442
- "loss": 0.7731,
443
- "rewards/accuracies": 0.7250000238418579,
444
- "rewards/chosen": 0.19448420405387878,
445
- "rewards/margins": 0.23441271483898163,
446
- "rewards/rejected": -0.039928533136844635,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0538430213928223,
453
- "logits/rejected": -2.038924217224121,
454
- "logps/chosen": -30.76409912109375,
455
- "logps/rejected": -32.64435958862305,
456
- "loss": 0.9184,
457
- "rewards/accuracies": 0.612500011920929,
458
- "rewards/chosen": 0.09601466357707977,
459
- "rewards/margins": 0.09152902662754059,
460
- "rewards/rejected": 0.004485635552555323,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9241282939910889,
467
- "logits/rejected": -1.9215872287750244,
468
- "logps/chosen": -32.469078063964844,
469
- "logps/rejected": -30.92861557006836,
470
- "loss": 0.7186,
471
- "rewards/accuracies": 0.762499988079071,
472
- "rewards/chosen": 0.2748206555843353,
473
- "rewards/margins": 0.34190455079078674,
474
- "rewards/rejected": -0.06708388030529022,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.224278211593628,
480
- "eval_logits/rejected": -2.219449996948242,
481
- "eval_logps/chosen": -34.111026763916016,
482
- "eval_logps/rejected": -37.63855743408203,
483
- "eval_loss": 0.9731553196907043,
484
- "eval_rewards/accuracies": 0.5070598125457764,
485
- "eval_rewards/chosen": -0.045885536819696426,
486
- "eval_rewards/margins": 0.02727569453418255,
487
- "eval_rewards/rejected": -0.07316123694181442,
488
- "eval_runtime": 145.8155,
489
- "eval_samples_per_second": 2.352,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9088821411132812,
497
- "logits/rejected": -1.9056317806243896,
498
- "logps/chosen": -31.343881607055664,
499
- "logps/rejected": -33.82904052734375,
500
- "loss": 0.8131,
501
- "rewards/accuracies": 0.7124999761581421,
502
- "rewards/chosen": 0.15568041801452637,
503
- "rewards/margins": 0.20781342685222626,
504
- "rewards/rejected": -0.05213301628828049,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9583854675292969,
511
- "logits/rejected": -1.9461708068847656,
512
- "logps/chosen": -34.326744079589844,
513
- "logps/rejected": -33.68773651123047,
514
- "loss": 0.763,
515
- "rewards/accuracies": 0.6499999761581421,
516
- "rewards/chosen": 0.1615992784500122,
517
- "rewards/margins": 0.250935435295105,
518
- "rewards/rejected": -0.08933614194393158,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9934914112091064,
525
- "logits/rejected": -1.9920612573623657,
526
- "logps/chosen": -33.20575714111328,
527
- "logps/rejected": -32.5851936340332,
528
- "loss": 0.7771,
529
- "rewards/accuracies": 0.75,
530
- "rewards/chosen": 0.16801992058753967,
531
- "rewards/margins": 0.23138853907585144,
532
- "rewards/rejected": -0.06336863338947296,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.080026149749756,
539
- "logits/rejected": -2.064359188079834,
540
- "logps/chosen": -33.830657958984375,
541
- "logps/rejected": -33.15457534790039,
542
- "loss": 0.7811,
543
- "rewards/accuracies": 0.675000011920929,
544
- "rewards/chosen": 0.20923766493797302,
545
- "rewards/margins": 0.23116116225719452,
546
- "rewards/rejected": -0.021923482418060303,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.952505111694336,
553
- "logits/rejected": -1.95168936252594,
554
- "logps/chosen": -32.850311279296875,
555
- "logps/rejected": -32.56792449951172,
556
- "loss": 0.7213,
557
- "rewards/accuracies": 0.6875,
558
- "rewards/chosen": 0.25907084345817566,
559
- "rewards/margins": 0.3207371234893799,
560
- "rewards/rejected": -0.061666231602430344,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9081252813339233,
567
- "logits/rejected": -1.918378472328186,
568
- "logps/chosen": -31.902362823486328,
569
- "logps/rejected": -35.33353805541992,
570
- "loss": 0.7965,
571
- "rewards/accuracies": 0.675000011920929,
572
- "rewards/chosen": 0.1813959777355194,
573
- "rewards/margins": 0.20826482772827148,
574
- "rewards/rejected": -0.026868879795074463,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0479800701141357,
581
- "logits/rejected": -2.0415284633636475,
582
- "logps/chosen": -33.33736038208008,
583
- "logps/rejected": -29.26900863647461,
584
- "loss": 0.8,
585
- "rewards/accuracies": 0.675000011920929,
586
- "rewards/chosen": 0.18876628577709198,
587
- "rewards/margins": 0.21431729197502136,
588
- "rewards/rejected": -0.025551024824380875,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9078292846679688,
595
- "logits/rejected": -1.9100587368011475,
596
- "logps/chosen": -33.90921401977539,
597
- "logps/rejected": -30.994491577148438,
598
- "loss": 0.7663,
599
- "rewards/accuracies": 0.699999988079071,
600
- "rewards/chosen": 0.19790422916412354,
601
- "rewards/margins": 0.271023154258728,
602
- "rewards/rejected": -0.07311896234750748,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.859264765157328,
610
- "train_runtime": 3251.7628,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8668904304504395,
29
+ "logits/rejected": -1.8712035417556763,
30
+ "logps/chosen": -36.981239318847656,
31
+ "logps/rejected": -33.63866424560547,
32
+ "loss": 0.6837,
33
+ "rewards/accuracies": 0.5138888955116272,
34
+ "rewards/chosen": 0.017751876264810562,
35
+ "rewards/margins": 0.023763436824083328,
36
+ "rewards/rejected": -0.0060115596279501915,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9975477457046509,
43
+ "logits/rejected": -2.0001885890960693,
44
+ "logps/chosen": -29.638402938842773,
45
+ "logps/rejected": -29.045080184936523,
46
+ "loss": 0.7005,
47
+ "rewards/accuracies": 0.4375,
48
+ "rewards/chosen": 0.002654150128364563,
49
+ "rewards/margins": -0.010201702825725079,
50
+ "rewards/rejected": 0.012855852022767067,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9209153652191162,
57
+ "logits/rejected": -1.9182332754135132,
58
+ "logps/chosen": -31.392269134521484,
59
+ "logps/rejected": -33.214996337890625,
60
+ "loss": 0.6893,
61
+ "rewards/accuracies": 0.550000011920929,
62
+ "rewards/chosen": 0.016679534688591957,
63
+ "rewards/margins": 0.014253495261073112,
64
+ "rewards/rejected": 0.0024260382633656263,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.017472505569458,
71
+ "logits/rejected": -2.0087223052978516,
72
+ "logps/chosen": -32.56648635864258,
73
+ "logps/rejected": -32.53681564331055,
74
+ "loss": 0.6837,
75
+ "rewards/accuracies": 0.5625,
76
+ "rewards/chosen": 0.007227012421935797,
77
+ "rewards/margins": 0.02412785217165947,
78
+ "rewards/rejected": -0.016900835558772087,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8619842529296875,
85
+ "logits/rejected": -1.8512147665023804,
86
+ "logps/chosen": -33.57755661010742,
87
+ "logps/rejected": -35.45317840576172,
88
+ "loss": 0.7035,
89
+ "rewards/accuracies": 0.4749999940395355,
90
+ "rewards/chosen": -0.014055396430194378,
91
+ "rewards/margins": -0.013470378704369068,
92
+ "rewards/rejected": -0.0005850124871358275,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.940241813659668,
99
+ "logits/rejected": -1.942185401916504,
100
+ "logps/chosen": -32.55712127685547,
101
+ "logps/rejected": -33.23926544189453,
102
+ "loss": 0.6629,
103
+ "rewards/accuracies": 0.6625000238418579,
104
+ "rewards/chosen": 0.030143504962325096,
105
+ "rewards/margins": 0.07766537368297577,
106
+ "rewards/rejected": -0.047521863132715225,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.070986270904541,
113
+ "logits/rejected": -2.0759458541870117,
114
+ "logps/chosen": -33.997291564941406,
115
+ "logps/rejected": -36.65125274658203,
116
+ "loss": 0.6775,
117
+ "rewards/accuracies": 0.5874999761581421,
118
+ "rewards/chosen": -0.006164415739476681,
119
+ "rewards/margins": 0.051500022411346436,
120
+ "rewards/rejected": -0.057664431631565094,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9328521490097046,
127
+ "logits/rejected": -1.935974359512329,
128
+ "logps/chosen": -34.317806243896484,
129
+ "logps/rejected": -34.648555755615234,
130
+ "loss": 0.6465,
131
+ "rewards/accuracies": 0.6625000238418579,
132
+ "rewards/chosen": 0.07700104266405106,
133
+ "rewards/margins": 0.11630574613809586,
134
+ "rewards/rejected": -0.0393047034740448,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.941239356994629,
141
+ "logits/rejected": -1.9457323551177979,
142
+ "logps/chosen": -32.385948181152344,
143
+ "logps/rejected": -32.34455108642578,
144
+ "loss": 0.6805,
145
+ "rewards/accuracies": 0.550000011920929,
146
+ "rewards/chosen": 0.057031381875276566,
147
+ "rewards/margins": 0.046979233622550964,
148
+ "rewards/rejected": 0.010052147321403027,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.0390255451202393,
155
+ "logits/rejected": -2.0370407104492188,
156
+ "logps/chosen": -32.15161895751953,
157
+ "logps/rejected": -31.304489135742188,
158
+ "loss": 0.6577,
159
+ "rewards/accuracies": 0.6625000238418579,
160
+ "rewards/chosen": 0.06245182827115059,
161
+ "rewards/margins": 0.08924683928489685,
162
+ "rewards/rejected": -0.02679501473903656,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.233787775039673,
168
+ "eval_logits/rejected": -2.228933572769165,
169
+ "eval_logps/chosen": -34.04001235961914,
170
+ "eval_logps/rejected": -37.535667419433594,
171
+ "eval_loss": 0.6948937773704529,
172
+ "eval_rewards/accuracies": 0.5315614938735962,
173
+ "eval_rewards/chosen": -0.0038188453763723373,
174
+ "eval_rewards/margins": 0.00951432902365923,
175
+ "eval_rewards/rejected": -0.013333176262676716,
176
+ "eval_runtime": 145.7707,
177
+ "eval_samples_per_second": 2.353,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.994390845298767,
185
+ "logits/rejected": -1.991999864578247,
186
+ "logps/chosen": -33.1099739074707,
187
+ "logps/rejected": -34.02802276611328,
188
+ "loss": 0.6747,
189
+ "rewards/accuracies": 0.6499999761581421,
190
+ "rewards/chosen": 0.09369214624166489,
191
+ "rewards/margins": 0.0927465409040451,
192
+ "rewards/rejected": 0.0009456165134906769,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0057625770568848,
199
+ "logits/rejected": -1.9974448680877686,
200
+ "logps/chosen": -32.31781768798828,
201
+ "logps/rejected": -32.132328033447266,
202
+ "loss": 0.6694,
203
+ "rewards/accuracies": 0.5625,
204
+ "rewards/chosen": 0.0889604315161705,
205
+ "rewards/margins": 0.06853620707988739,
206
+ "rewards/rejected": 0.02042422816157341,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0336880683898926,
213
+ "logits/rejected": -2.0257279872894287,
214
+ "logps/chosen": -30.32443618774414,
215
+ "logps/rejected": -32.07221221923828,
216
+ "loss": 0.6522,
217
+ "rewards/accuracies": 0.6499999761581421,
218
+ "rewards/chosen": 0.10589826107025146,
219
+ "rewards/margins": 0.11934350430965424,
220
+ "rewards/rejected": -0.013445250689983368,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9642670154571533,
227
+ "logits/rejected": -1.9745124578475952,
228
+ "logps/chosen": -31.212024688720703,
229
+ "logps/rejected": -32.565834045410156,
230
+ "loss": 0.6272,
231
+ "rewards/accuracies": 0.6875,
232
+ "rewards/chosen": 0.14534947276115417,
233
+ "rewards/margins": 0.16701875627040863,
234
+ "rewards/rejected": -0.02166926860809326,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8759937286376953,
241
+ "logits/rejected": -1.8771553039550781,
242
+ "logps/chosen": -33.917335510253906,
243
+ "logps/rejected": -34.8185920715332,
244
+ "loss": 0.6064,
245
+ "rewards/accuracies": 0.612500011920929,
246
+ "rewards/chosen": 0.19274887442588806,
247
+ "rewards/margins": 0.235683411359787,
248
+ "rewards/rejected": -0.042934536933898926,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.927369475364685,
255
+ "logits/rejected": -1.9239553213119507,
256
+ "logps/chosen": -35.98323440551758,
257
+ "logps/rejected": -32.705108642578125,
258
+ "loss": 0.6464,
259
+ "rewards/accuracies": 0.5874999761581421,
260
+ "rewards/chosen": 0.13190819323062897,
261
+ "rewards/margins": 0.11955627053976059,
262
+ "rewards/rejected": 0.012351910583674908,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.028454542160034,
269
+ "logits/rejected": -2.0210976600646973,
270
+ "logps/chosen": -33.50457763671875,
271
+ "logps/rejected": -31.386072158813477,
272
+ "loss": 0.6077,
273
+ "rewards/accuracies": 0.637499988079071,
274
+ "rewards/chosen": 0.20529839396476746,
275
+ "rewards/margins": 0.22925393283367157,
276
+ "rewards/rejected": -0.02395555004477501,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0345230102539062,
283
+ "logits/rejected": -2.0397608280181885,
284
+ "logps/chosen": -32.19758224487305,
285
+ "logps/rejected": -32.435523986816406,
286
+ "loss": 0.6083,
287
+ "rewards/accuracies": 0.7124999761581421,
288
+ "rewards/chosen": 0.24304144084453583,
289
+ "rewards/margins": 0.2083090990781784,
290
+ "rewards/rejected": 0.03473237156867981,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.035979747772217,
297
+ "logits/rejected": -2.0332181453704834,
298
+ "logps/chosen": -31.255151748657227,
299
+ "logps/rejected": -31.321630477905273,
300
+ "loss": 0.6326,
301
+ "rewards/accuracies": 0.675000011920929,
302
+ "rewards/chosen": 0.163661926984787,
303
+ "rewards/margins": 0.16605310142040253,
304
+ "rewards/rejected": -0.0023911758325994015,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9058892726898193,
311
+ "logits/rejected": -1.910540223121643,
312
+ "logps/chosen": -31.33197021484375,
313
+ "logps/rejected": -32.817604064941406,
314
+ "loss": 0.6156,
315
  "rewards/accuracies": 0.7124999761581421,
316
+ "rewards/chosen": 0.19201864302158356,
317
+ "rewards/margins": 0.21068540215492249,
318
+ "rewards/rejected": -0.01866675540804863,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2316529750823975,
324
+ "eval_logits/rejected": -2.2268118858337402,
325
+ "eval_logps/chosen": -34.053470611572266,
326
+ "eval_logps/rejected": -37.557804107666016,
327
+ "eval_loss": 0.6943473219871521,
328
+ "eval_rewards/accuracies": 0.5191029906272888,
329
+ "eval_rewards/chosen": -0.013246187008917332,
330
+ "eval_rewards/margins": 0.015584951266646385,
331
+ "eval_rewards/rejected": -0.028831137344241142,
332
+ "eval_runtime": 145.8114,
333
+ "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.01808762550354,
341
+ "logits/rejected": -2.0287296772003174,
342
+ "logps/chosen": -31.763586044311523,
343
+ "logps/rejected": -33.926063537597656,
344
+ "loss": 0.622,
345
+ "rewards/accuracies": 0.637499988079071,
346
+ "rewards/chosen": 0.14540112018585205,
347
+ "rewards/margins": 0.1897541582584381,
348
+ "rewards/rejected": -0.04435301572084427,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.910658597946167,
355
+ "logits/rejected": -1.9254140853881836,
356
+ "logps/chosen": -29.832544326782227,
357
+ "logps/rejected": -31.612533569335938,
358
+ "loss": 0.6024,
359
+ "rewards/accuracies": 0.7250000238418579,
360
+ "rewards/chosen": 0.19529443979263306,
361
+ "rewards/margins": 0.23334410786628723,
362
+ "rewards/rejected": -0.038049641996622086,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9673402309417725,
369
+ "logits/rejected": -1.9713077545166016,
370
+ "logps/chosen": -33.062660217285156,
371
+ "logps/rejected": -31.608203887939453,
372
+ "loss": 0.5911,
373
+ "rewards/accuracies": 0.6875,
374
+ "rewards/chosen": 0.24836687743663788,
375
+ "rewards/margins": 0.2895973324775696,
376
+ "rewards/rejected": -0.0412304513156414,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9657704830169678,
383
+ "logits/rejected": -1.9439115524291992,
384
+ "logps/chosen": -33.82358932495117,
385
+ "logps/rejected": -35.09131622314453,
386
+ "loss": 0.5733,
387
  "rewards/accuracies": 0.7250000238418579,
388
+ "rewards/chosen": 0.23111140727996826,
389
+ "rewards/margins": 0.32947883009910583,
390
+ "rewards/rejected": -0.09836738556623459,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.007312536239624,
397
+ "logits/rejected": -2.003988742828369,
398
+ "logps/chosen": -32.68491744995117,
399
+ "logps/rejected": -36.24928283691406,
400
+ "loss": 0.6228,
401
  "rewards/accuracies": 0.6499999761581421,
402
+ "rewards/chosen": 0.1681055724620819,
403
+ "rewards/margins": 0.18515849113464355,
404
+ "rewards/rejected": -0.01705293543636799,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8734614849090576,
411
+ "logits/rejected": -1.8710591793060303,
412
+ "logps/chosen": -33.96207809448242,
413
+ "logps/rejected": -35.5266227722168,
414
+ "loss": 0.6286,
415
+ "rewards/accuracies": 0.6499999761581421,
416
+ "rewards/chosen": 0.15842413902282715,
417
+ "rewards/margins": 0.17382602393627167,
418
+ "rewards/rejected": -0.015401872806251049,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8584483861923218,
425
+ "logits/rejected": -1.8560174703598022,
426
+ "logps/chosen": -34.1807975769043,
427
+ "logps/rejected": -31.80449867248535,
428
+ "loss": 0.6325,
429
+ "rewards/accuracies": 0.699999988079071,
430
+ "rewards/chosen": 0.15176042914390564,
431
+ "rewards/margins": 0.17497751116752625,
432
+ "rewards/rejected": -0.02321707457304001,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9630708694458008,
439
+ "logits/rejected": -1.9525701999664307,
440
+ "logps/chosen": -35.00975036621094,
441
+ "logps/rejected": -31.84867286682129,
442
+ "loss": 0.5978,
443
+ "rewards/accuracies": 0.737500011920929,
444
+ "rewards/chosen": 0.2428235560655594,
445
+ "rewards/margins": 0.24820086359977722,
446
+ "rewards/rejected": -0.005377279128879309,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0586793422698975,
453
+ "logits/rejected": -2.043766498565674,
454
+ "logps/chosen": -30.7253360748291,
455
+ "logps/rejected": -32.66551971435547,
456
+ "loss": 0.6462,
457
+ "rewards/accuracies": 0.637499988079071,
458
+ "rewards/chosen": 0.13915367424488068,
459
+ "rewards/margins": 0.14873233437538147,
460
+ "rewards/rejected": -0.009578653611242771,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9287078380584717,
467
+ "logits/rejected": -1.92616868019104,
468
+ "logps/chosen": -32.39979553222656,
469
+ "logps/rejected": -30.87893295288086,
470
+ "loss": 0.5468,
471
+ "rewards/accuracies": 0.75,
472
+ "rewards/chosen": 0.36911940574645996,
473
+ "rewards/margins": 0.4126061797142029,
474
+ "rewards/rejected": -0.04348675161600113,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2277145385742188,
480
+ "eval_logits/rejected": -2.222882032394409,
481
+ "eval_logps/chosen": -34.06844711303711,
482
+ "eval_logps/rejected": -37.586029052734375,
483
+ "eval_loss": 0.6902604699134827,
484
+ "eval_rewards/accuracies": 0.5191029906272888,
485
+ "eval_rewards/chosen": -0.023726314306259155,
486
+ "eval_rewards/margins": 0.024859989061951637,
487
+ "eval_rewards/rejected": -0.04858630895614624,
488
+ "eval_runtime": 145.7514,
489
+ "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9126994609832764,
497
+ "logits/rejected": -1.909444808959961,
498
+ "logps/chosen": -31.307331085205078,
499
+ "logps/rejected": -33.79678726196289,
500
+ "loss": 0.6022,
501
+ "rewards/accuracies": 0.762499988079071,
502
+ "rewards/chosen": 0.20721367001533508,
503
+ "rewards/margins": 0.24546091258525848,
504
+ "rewards/rejected": -0.038247235119342804,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9636253118515015,
511
+ "logits/rejected": -1.9514182806015015,
512
+ "logps/chosen": -34.32239532470703,
513
+ "logps/rejected": -33.65345764160156,
514
+ "loss": 0.5933,
515
+ "rewards/accuracies": 0.737500011920929,
516
+ "rewards/chosen": 0.19157564640045166,
517
+ "rewards/margins": 0.27180662751197815,
518
+ "rewards/rejected": -0.08023098856210709,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9983371496200562,
525
+ "logits/rejected": -1.9969165325164795,
526
+ "logps/chosen": -33.176841735839844,
527
+ "logps/rejected": -32.538509368896484,
528
+ "loss": 0.5981,
529
+ "rewards/accuracies": 0.737500011920929,
530
+ "rewards/chosen": 0.21626754105091095,
531
+ "rewards/margins": 0.257517009973526,
532
+ "rewards/rejected": -0.04124947637319565,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.085096836090088,
539
+ "logits/rejected": -2.069387435913086,
540
+ "logps/chosen": -33.758583068847656,
541
+ "logps/rejected": -33.068748474121094,
542
+ "loss": 0.5952,
543
+ "rewards/accuracies": 0.6625000238418579,
544
+ "rewards/chosen": 0.29456597566604614,
545
+ "rewards/margins": 0.26006320118904114,
546
+ "rewards/rejected": 0.034502796828746796,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9578405618667603,
553
+ "logits/rejected": -1.9569900035858154,
554
+ "logps/chosen": -32.835628509521484,
555
+ "logps/rejected": -32.520538330078125,
556
+ "loss": 0.5707,
557
+ "rewards/accuracies": 0.75,
558
+ "rewards/chosen": 0.3125234544277191,
559
+ "rewards/margins": 0.3512992262840271,
560
+ "rewards/rejected": -0.038775794208049774,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9134842157363892,
567
+ "logits/rejected": -1.9237855672836304,
568
+ "logps/chosen": -31.848628997802734,
569
+ "logps/rejected": -35.309120178222656,
570
+ "loss": 0.5923,
571
+ "rewards/accuracies": 0.6875,
572
+ "rewards/chosen": 0.24924305081367493,
573
+ "rewards/margins": 0.2634957432746887,
574
+ "rewards/rejected": -0.014252680353820324,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.052818775177002,
581
+ "logits/rejected": -2.046323776245117,
582
+ "logps/chosen": -33.34727096557617,
583
+ "logps/rejected": -29.256671905517578,
584
+ "loss": 0.5993,
585
+ "rewards/accuracies": 0.737500011920929,
586
+ "rewards/chosen": 0.21328690648078918,
587
+ "rewards/margins": 0.23445896804332733,
588
+ "rewards/rejected": -0.021172069013118744,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9116294384002686,
595
+ "logits/rejected": -1.913830041885376,
596
+ "logps/chosen": -33.83928680419922,
597
+ "logps/rejected": -30.931400299072266,
598
+ "loss": 0.5714,
599
+ "rewards/accuracies": 0.75,
600
+ "rewards/chosen": 0.27983543276786804,
601
+ "rewards/margins": 0.3209769129753113,
602
+ "rewards/rejected": -0.04114149510860443,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.6291831790626823,
610
+ "train_runtime": 3251.1508,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }