hugodk-sch commited on
Commit
365af06
1 Parent(s): 7fcbb51

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +374 -374
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.6907
24
- - Rewards/chosen: -0.0131
25
- - Rewards/rejected: -0.0201
26
- - Rewards/accuracies: 0.5133
27
- - Rewards/margins: 0.0070
28
- - Logps/rejected: -37.6172
29
- - Logps/chosen: -34.1001
30
- - Logits/rejected: -2.2213
31
- - Logits/chosen: -2.2261
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.6813 | 0.26 | 100 | 0.6915 | 0.0010 | -0.0036 | 0.5220 | 0.0046 | -37.5345 | -34.0295 | -2.2270 | -2.2319 |
67
- | 0.6632 | 0.52 | 200 | 0.6888 | -0.0034 | -0.0137 | 0.5660 | 0.0103 | -37.5852 | -34.0514 | -2.2240 | -2.2288 |
68
- | 0.6327 | 0.78 | 300 | 0.6909 | -0.0119 | -0.0186 | 0.4950 | 0.0067 | -37.6097 | -34.0940 | -2.2214 | -2.2262 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.4980
21
+ - Rewards/chosen: 0.0533
22
+ - Rewards/rejected: 0.0449
23
+ - Rewards/accuracies: 0.5158
24
+ - Rewards/margins: 0.0084
25
+ - Logps/rejected: -37.2920
26
+ - Logps/chosen: -33.7681
27
+ - Logits/rejected: -2.2264
28
+ - Logits/chosen: -2.2312
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.4947 | 0.26 | 100 | 0.4994 | 0.0239 | 0.0208 | 0.5216 | 0.0031 | -37.4126 | -33.9151 | -2.2299 | -2.2348 |
64
+ | 0.4825 | 0.52 | 200 | 0.4974 | 0.0511 | 0.0397 | 0.5544 | 0.0113 | -37.3179 | -33.7792 | -2.2274 | -2.2322 |
65
+ | 0.4669 | 0.78 | 300 | 0.4980 | 0.0533 | 0.0449 | 0.5158 | 0.0084 | -37.2920 | -33.7681 | -2.2264 | -2.2312 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fe3cf7d65d359cffa8bd5e777721b4103dc423a79361130841e72ce3085a9a9
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbe40ba78b8d64b2ad247d9d8b649621cc40ab032cb206a1fa7ea2cb6bd7cb78
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2261412143707275,
4
- "eval_logits/rejected": -2.2213146686553955,
5
- "eval_logps/chosen": -34.100101470947266,
6
- "eval_logps/rejected": -37.61717224121094,
7
- "eval_loss": 0.6907125115394592,
8
- "eval_rewards/accuracies": 0.5132890343666077,
9
- "eval_rewards/chosen": -0.013110256753861904,
10
- "eval_rewards/margins": 0.007000547368079424,
11
- "eval_rewards/rejected": -0.020110804587602615,
12
- "eval_runtime": 145.4751,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.358,
15
- "eval_steps_per_second": 0.296,
16
- "train_loss": 0.6689951481757226,
17
- "train_runtime": 3249.3574,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.948,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.4876757522682091,
4
+ "train_runtime": 3250.1859,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6689951481757226,
4
- "train_runtime": 3249.3574,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.948,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.4876757522682091,
4
+ "train_runtime": 3250.1859,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,155 +25,155 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8665436506271362,
29
- "logits/rejected": -1.8708542585372925,
30
- "logps/chosen": -36.982460021972656,
31
- "logps/rejected": -33.656585693359375,
32
- "loss": 0.6882,
33
- "rewards/accuracies": 0.5138888955116272,
34
- "rewards/chosen": 0.0048276386223733425,
35
- "rewards/margins": 0.010130541399121284,
36
- "rewards/rejected": -0.005302901845425367,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9975639581680298,
43
- "logits/rejected": -2.000220537185669,
44
- "logps/chosen": -29.6324462890625,
45
- "logps/rejected": -29.057437896728516,
46
- "loss": 0.693,
47
- "rewards/accuracies": 0.512499988079071,
48
- "rewards/chosen": 0.0019495869055390358,
49
- "rewards/margins": 0.0007479515625163913,
50
- "rewards/rejected": 0.0012016354594379663,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9208921194076538,
57
- "logits/rejected": -1.918195128440857,
58
- "logps/chosen": -31.420969009399414,
59
- "logps/rejected": -33.22509765625,
60
- "loss": 0.6932,
61
- "rewards/accuracies": 0.48750001192092896,
62
- "rewards/chosen": -0.000974868715275079,
63
- "rewards/margins": 0.0003514128620736301,
64
- "rewards/rejected": -0.0013262818101793528,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.018131971359253,
71
- "logits/rejected": -2.0093750953674316,
72
- "logps/chosen": -32.56100082397461,
73
- "logps/rejected": -32.525699615478516,
74
- "loss": 0.6905,
75
- "rewards/accuracies": 0.512499988079071,
76
- "rewards/chosen": 0.0031610552687197924,
77
- "rewards/margins": 0.0057663023471832275,
78
- "rewards/rejected": -0.002605247776955366,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8627980947494507,
85
- "logits/rejected": -1.852043867111206,
86
- "logps/chosen": -33.57464599609375,
87
- "logps/rejected": -35.459083557128906,
88
- "loss": 0.6945,
89
- "rewards/accuracies": 0.4625000059604645,
90
- "rewards/chosen": -0.003433463629335165,
91
- "rewards/margins": -0.0020852875895798206,
92
- "rewards/rejected": -0.0013481763890013099,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9412128925323486,
99
- "logits/rejected": -1.9431606531143188,
100
- "logps/chosen": -32.51996612548828,
101
- "logps/rejected": -33.226768493652344,
102
- "loss": 0.6806,
103
- "rewards/accuracies": 0.5625,
104
- "rewards/chosen": 0.01604365184903145,
105
- "rewards/margins": 0.02712194062769413,
106
- "rewards/rejected": -0.011078287847340107,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.072383165359497,
113
- "logits/rejected": -2.0773634910583496,
114
- "logps/chosen": -33.964813232421875,
115
- "logps/rejected": -36.62574005126953,
116
- "loss": 0.6861,
117
- "rewards/accuracies": 0.5625,
118
- "rewards/chosen": 0.004735334776341915,
119
- "rewards/margins": 0.016109289601445198,
120
- "rewards/rejected": -0.011373954825103283,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9332096576690674,
127
- "logits/rejected": -1.9363422393798828,
128
- "logps/chosen": -34.310150146484375,
129
- "logps/rejected": -34.669891357421875,
130
- "loss": 0.6749,
131
- "rewards/accuracies": 0.625,
132
- "rewards/chosen": 0.023532329127192497,
133
- "rewards/margins": 0.03902902454137802,
134
- "rewards/rejected": -0.015496693551540375,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9401609897613525,
141
- "logits/rejected": -1.944685935974121,
142
- "logps/chosen": -32.39472961425781,
143
- "logps/rejected": -32.360809326171875,
144
- "loss": 0.6865,
145
- "rewards/accuracies": 0.625,
146
- "rewards/chosen": 0.01453828252851963,
147
- "rewards/margins": 0.014918209984898567,
148
- "rewards/rejected": -0.000379929319024086,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0377113819122314,
155
- "logits/rejected": -2.035726308822632,
156
- "logps/chosen": -32.14560317993164,
157
- "logps/rejected": -31.297948837280273,
158
- "loss": 0.6813,
159
- "rewards/accuracies": 0.6000000238418579,
160
- "rewards/chosen": 0.01904645375907421,
161
- "rewards/margins": 0.0253940187394619,
162
- "rewards/rejected": -0.006347564049065113,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.231863498687744,
168
- "eval_logits/rejected": -2.227025270462036,
169
- "eval_logps/chosen": -34.02951431274414,
170
- "eval_logps/rejected": -37.534454345703125,
171
- "eval_loss": 0.6914929151535034,
172
- "eval_rewards/accuracies": 0.5220099687576294,
173
- "eval_rewards/chosen": 0.0010076783364638686,
174
- "eval_rewards/margins": 0.004574176389724016,
175
- "eval_rewards/rejected": -0.0035664979368448257,
176
- "eval_runtime": 145.7925,
177
  "eval_samples_per_second": 2.353,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
@@ -181,434 +181,434 @@
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.992661476135254,
185
- "logits/rejected": -1.9902756214141846,
186
- "logps/chosen": -33.13728713989258,
187
- "logps/rejected": -34.051490783691406,
188
- "loss": 0.6834,
189
- "rewards/accuracies": 0.6499999761581421,
190
- "rewards/chosen": 0.02130572497844696,
191
- "rewards/margins": 0.025729293003678322,
192
- "rewards/rejected": -0.004423565696924925,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0037741661071777,
199
- "logits/rejected": -1.995439887046814,
200
- "logps/chosen": -32.33051681518555,
201
- "logps/rejected": -32.13713836669922,
202
- "loss": 0.6853,
203
- "rewards/accuracies": 0.5874999761581421,
204
- "rewards/chosen": 0.022878289222717285,
205
- "rewards/margins": 0.01800495944917202,
206
- "rewards/rejected": 0.004873327445238829,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.032257318496704,
213
- "logits/rejected": -2.024292230606079,
214
- "logps/chosen": -30.306774139404297,
215
- "logps/rejected": -32.081024169921875,
216
- "loss": 0.6754,
217
- "rewards/accuracies": 0.6499999761581421,
218
- "rewards/chosen": 0.03378837928175926,
219
- "rewards/margins": 0.03939158096909523,
220
- "rewards/rejected": -0.00560319609940052,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9630043506622314,
227
- "logits/rejected": -1.973233938217163,
228
- "logps/chosen": -31.224035263061523,
229
- "logps/rejected": -32.56679916381836,
230
- "loss": 0.6718,
231
- "rewards/accuracies": 0.675000011920929,
232
- "rewards/chosen": 0.0391254760324955,
233
- "rewards/margins": 0.045508723706007004,
234
- "rewards/rejected": -0.006383246276527643,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.874066710472107,
241
- "logits/rejected": -1.8752658367156982,
242
- "logps/chosen": -33.90449905395508,
243
- "logps/rejected": -34.78742218017578,
244
- "loss": 0.6642,
245
  "rewards/accuracies": 0.6000000238418579,
246
- "rewards/chosen": 0.05763882398605347,
247
- "rewards/margins": 0.06367169320583344,
248
- "rewards/rejected": -0.006032869219779968,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9251247644424438,
255
- "logits/rejected": -1.9217197895050049,
256
- "logps/chosen": -36.002262115478516,
257
- "logps/rejected": -32.70859909057617,
258
- "loss": 0.6788,
259
- "rewards/accuracies": 0.6000000238418579,
260
- "rewards/chosen": 0.03388362005352974,
261
- "rewards/margins": 0.031053191050887108,
262
- "rewards/rejected": 0.0028304329607635736,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.025562286376953,
269
- "logits/rejected": -2.018228530883789,
270
- "logps/chosen": -33.46672439575195,
271
- "logps/rejected": -31.409042358398438,
272
- "loss": 0.657,
273
- "rewards/accuracies": 0.699999988079071,
274
- "rewards/chosen": 0.06622789800167084,
275
- "rewards/margins": 0.07766623049974442,
276
- "rewards/rejected": -0.011438337154686451,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.0323550701141357,
283
- "logits/rejected": -2.037605047225952,
284
- "logps/chosen": -32.23218536376953,
285
- "logps/rejected": -32.439491271972656,
286
- "loss": 0.6682,
287
- "rewards/accuracies": 0.675000011920929,
288
- "rewards/chosen": 0.06252055615186691,
289
- "rewards/margins": 0.05339093878865242,
290
- "rewards/rejected": 0.009129621088504791,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.0334537029266357,
297
- "logits/rejected": -2.030702590942383,
298
- "logps/chosen": -31.266775131225586,
299
- "logps/rejected": -31.347997665405273,
300
- "loss": 0.67,
301
- "rewards/accuracies": 0.675000011920929,
302
- "rewards/chosen": 0.04443611577153206,
303
- "rewards/margins": 0.050393205136060715,
304
- "rewards/rejected": -0.005957084707915783,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9037307500839233,
311
- "logits/rejected": -1.9083878993988037,
312
- "logps/chosen": -31.30868148803711,
313
- "logps/rejected": -32.823123931884766,
314
- "loss": 0.6632,
315
- "rewards/accuracies": 0.6875,
316
- "rewards/chosen": 0.05951983854174614,
317
- "rewards/margins": 0.0659579336643219,
318
- "rewards/rejected": -0.006438094191253185,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.22880482673645,
324
- "eval_logits/rejected": -2.2239701747894287,
325
- "eval_logps/chosen": -34.05141067504883,
326
- "eval_logps/rejected": -37.58522033691406,
327
- "eval_loss": 0.6888246536254883,
328
- "eval_rewards/accuracies": 0.5660299062728882,
329
- "eval_rewards/chosen": -0.003371814964339137,
330
- "eval_rewards/margins": 0.01034836657345295,
331
- "eval_rewards/rejected": -0.013720180839300156,
332
- "eval_runtime": 145.665,
333
- "eval_samples_per_second": 2.355,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.016369342803955,
341
- "logits/rejected": -2.027027130126953,
342
- "logps/chosen": -31.75009536743164,
343
- "logps/rejected": -33.965911865234375,
344
- "loss": 0.6631,
345
- "rewards/accuracies": 0.6875,
346
- "rewards/chosen": 0.04424174129962921,
347
- "rewards/margins": 0.06488426774740219,
348
- "rewards/rejected": -0.020642530173063278,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.909157156944275,
355
- "logits/rejected": -1.9239362478256226,
356
- "logps/chosen": -29.8031005859375,
357
- "logps/rejected": -31.647253036499023,
358
- "loss": 0.6564,
359
  "rewards/accuracies": 0.7124999761581421,
360
- "rewards/chosen": 0.06168792396783829,
361
- "rewards/margins": 0.07950346171855927,
362
- "rewards/rejected": -0.017815548926591873,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9656283855438232,
369
- "logits/rejected": -1.9696086645126343,
370
- "logps/chosen": -33.09147644042969,
371
- "logps/rejected": -31.6539306640625,
372
- "loss": 0.6543,
373
- "rewards/accuracies": 0.699999988079071,
374
- "rewards/chosen": 0.06519898772239685,
375
- "rewards/margins": 0.08612470328807831,
376
- "rewards/rejected": -0.02092570997774601,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.963721513748169,
383
- "logits/rejected": -1.9419059753417969,
384
- "logps/chosen": -33.84326934814453,
385
- "logps/rejected": -35.13850784301758,
386
- "loss": 0.6479,
387
- "rewards/accuracies": 0.737500011920929,
388
- "rewards/chosen": 0.06209472939372063,
389
- "rewards/margins": 0.0996372401714325,
390
- "rewards/rejected": -0.03754251450300217,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.004868268966675,
397
- "logits/rejected": -2.0015549659729004,
398
- "logps/chosen": -32.729896545410156,
399
- "logps/rejected": -36.27812957763672,
400
- "loss": 0.6706,
401
  "rewards/accuracies": 0.5874999761581421,
402
- "rewards/chosen": 0.03903444483876228,
403
- "rewards/margins": 0.04967557638883591,
404
- "rewards/rejected": -0.010641133412718773,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8725497722625732,
411
- "logits/rejected": -1.8701589107513428,
412
- "logps/chosen": -33.99323654174805,
413
- "logps/rejected": -35.557159423828125,
414
- "loss": 0.6706,
415
- "rewards/accuracies": 0.699999988079071,
416
- "rewards/chosen": 0.039031852036714554,
417
- "rewards/margins": 0.04953894019126892,
418
- "rewards/rejected": -0.010507088154554367,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8571383953094482,
425
- "logits/rejected": -1.854741096496582,
426
- "logps/chosen": -34.20746612548828,
427
- "logps/rejected": -31.83416748046875,
428
- "loss": 0.6708,
429
- "rewards/accuracies": 0.6000000238418579,
430
- "rewards/chosen": 0.0380266010761261,
431
- "rewards/margins": 0.05059366300702095,
432
- "rewards/rejected": -0.01256705541163683,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9600715637207031,
439
- "logits/rejected": -1.9495933055877686,
440
- "logps/chosen": -34.99934387207031,
441
- "logps/rejected": -31.900096893310547,
442
- "loss": 0.6544,
443
- "rewards/accuracies": 0.7124999761581421,
444
- "rewards/chosen": 0.07145937532186508,
445
- "rewards/margins": 0.08328022807836533,
446
- "rewards/rejected": -0.011820845305919647,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0559241771698,
453
- "logits/rejected": -2.04101300239563,
454
- "logps/chosen": -30.744888305664062,
455
- "logps/rejected": -32.62079620361328,
456
- "loss": 0.6809,
457
- "rewards/accuracies": 0.5375000238418579,
458
- "rewards/chosen": 0.035847391933202744,
459
- "rewards/margins": 0.029639026150107384,
460
- "rewards/rejected": 0.006208371836692095,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9259971380233765,
467
- "logits/rejected": -1.9234883785247803,
468
- "logps/chosen": -32.35750961303711,
469
- "logps/rejected": -30.92331314086914,
470
- "loss": 0.6327,
471
- "rewards/accuracies": 0.7875000238418579,
472
- "rewards/chosen": 0.11392021179199219,
473
- "rewards/margins": 0.13522081077098846,
474
- "rewards/rejected": -0.021300604566931725,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2262163162231445,
480
- "eval_logits/rejected": -2.2213830947875977,
481
- "eval_logps/chosen": -34.093963623046875,
482
- "eval_logps/rejected": -37.60972213745117,
483
- "eval_loss": 0.6908519268035889,
484
- "eval_rewards/accuracies": 0.49501657485961914,
485
- "eval_rewards/chosen": -0.011881927028298378,
486
- "eval_rewards/margins": 0.006738076452165842,
487
- "eval_rewards/rejected": -0.018620004877448082,
488
- "eval_runtime": 145.4585,
489
- "eval_samples_per_second": 2.358,
490
- "eval_steps_per_second": 0.296,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9112951755523682,
497
- "logits/rejected": -1.9080655574798584,
498
- "logps/chosen": -31.314626693725586,
499
- "logps/rejected": -33.85969543457031,
500
- "loss": 0.6566,
501
- "rewards/accuracies": 0.762499988079071,
502
- "rewards/chosen": 0.05774437263607979,
503
- "rewards/margins": 0.08125325292348862,
504
- "rewards/rejected": -0.023508887737989426,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.960738182067871,
511
- "logits/rejected": -1.9485470056533813,
512
- "logps/chosen": -34.32622528076172,
513
- "logps/rejected": -33.65083312988281,
514
- "loss": 0.6586,
515
- "rewards/accuracies": 0.637499988079071,
516
- "rewards/chosen": 0.0539703294634819,
517
- "rewards/margins": 0.0763682946562767,
518
- "rewards/rejected": -0.022397976368665695,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9961421489715576,
525
- "logits/rejected": -1.9947240352630615,
526
- "logps/chosen": -33.134159088134766,
527
- "logps/rejected": -32.56267547607422,
528
- "loss": 0.6536,
529
- "rewards/accuracies": 0.762499988079071,
530
- "rewards/chosen": 0.07032694667577744,
531
- "rewards/margins": 0.08694546669721603,
532
- "rewards/rejected": -0.0166185162961483,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.083061933517456,
539
- "logits/rejected": -2.0673813819885254,
540
- "logps/chosen": -33.78687286376953,
541
- "logps/rejected": -33.12049102783203,
542
- "loss": 0.6568,
543
- "rewards/accuracies": 0.75,
544
- "rewards/chosen": 0.07850398868322372,
545
- "rewards/margins": 0.07899539172649384,
546
- "rewards/rejected": -0.0004914018791168928,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9557991027832031,
553
- "logits/rejected": -1.9549639225006104,
554
- "logps/chosen": -32.84474563598633,
555
- "logps/rejected": -32.53619384765625,
556
- "loss": 0.648,
557
  "rewards/accuracies": 0.675000011920929,
558
- "rewards/chosen": 0.0874704122543335,
559
- "rewards/margins": 0.10167930275201797,
560
- "rewards/rejected": -0.014208881184458733,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9108169078826904,
567
- "logits/rejected": -1.9210844039916992,
568
- "logps/chosen": -31.8836612701416,
569
- "logps/rejected": -35.36156463623047,
570
- "loss": 0.6572,
571
- "rewards/accuracies": 0.6875,
572
- "rewards/chosen": 0.06420587003231049,
573
- "rewards/margins": 0.07876714318990707,
574
- "rewards/rejected": -0.014561265707015991,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0503978729248047,
581
- "logits/rejected": -2.043942928314209,
582
- "logps/chosen": -33.33507537841797,
583
- "logps/rejected": -29.28571128845215,
584
- "loss": 0.6582,
585
  "rewards/accuracies": 0.75,
586
- "rewards/chosen": 0.06337906420230865,
587
- "rewards/margins": 0.07523626834154129,
588
- "rewards/rejected": -0.011857211589813232,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9104009866714478,
595
- "logits/rejected": -1.9126161336898804,
596
- "logps/chosen": -33.88220977783203,
597
- "logps/rejected": -30.987438201904297,
598
- "loss": 0.65,
599
- "rewards/accuracies": 0.7250000238418579,
600
- "rewards/chosen": 0.07136790454387665,
601
- "rewards/margins": 0.09433047473430634,
602
- "rewards/rejected": -0.02296256460249424,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.6689951481757226,
610
- "train_runtime": 3249.3574,
611
- "train_samples_per_second": 0.948,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.5,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8665586709976196,
29
+ "logits/rejected": -1.8708692789077759,
30
+ "logps/chosen": -37.00250244140625,
31
+ "logps/rejected": -33.66969299316406,
32
+ "loss": 0.4985,
33
+ "rewards/accuracies": 0.5416666865348816,
34
+ "rewards/chosen": 0.0008193479152396321,
35
+ "rewards/margins": 0.008743342012166977,
36
+ "rewards/rejected": -0.007923995144665241,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9974254369735718,
43
+ "logits/rejected": -2.0000810623168945,
44
+ "logps/chosen": -29.634906768798828,
45
+ "logps/rejected": -29.027408599853516,
46
+ "loss": 0.5009,
47
+ "rewards/accuracies": 0.38749998807907104,
48
+ "rewards/chosen": 0.001457492122426629,
49
+ "rewards/margins": -0.005749998614192009,
50
+ "rewards/rejected": 0.007207490503787994,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9204790592193604,
57
+ "logits/rejected": -1.9177839756011963,
58
+ "logps/chosen": -31.412555694580078,
59
+ "logps/rejected": -33.24369812011719,
60
+ "loss": 0.4992,
61
+ "rewards/accuracies": 0.550000011920929,
62
+ "rewards/chosen": 0.0007082058000378311,
63
+ "rewards/margins": 0.005755766294896603,
64
+ "rewards/rejected": -0.005047560669481754,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0162367820739746,
71
+ "logits/rejected": -2.007521629333496,
72
+ "logps/chosen": -32.55222702026367,
73
+ "logps/rejected": -32.50428771972656,
74
+ "loss": 0.4992,
75
+ "rewards/accuracies": 0.5,
76
+ "rewards/chosen": 0.004915344063192606,
77
+ "rewards/margins": 0.0032389431726187468,
78
+ "rewards/rejected": 0.0016764007741585374,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8627817630767822,
85
+ "logits/rejected": -1.8519961833953857,
86
+ "logps/chosen": -33.52722930908203,
87
+ "logps/rejected": -35.41474151611328,
88
+ "loss": 0.5002,
89
+ "rewards/accuracies": 0.4749999940395355,
90
+ "rewards/chosen": 0.0060504418797791,
91
+ "rewards/margins": -0.001469915034249425,
92
+ "rewards/rejected": 0.007520356681197882,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9401956796646118,
99
+ "logits/rejected": -1.9421344995498657,
100
+ "logps/chosen": -32.53112030029297,
101
+ "logps/rejected": -33.1898307800293,
102
+ "loss": 0.4964,
103
+ "rewards/accuracies": 0.6000000238418579,
104
+ "rewards/chosen": 0.01381197851151228,
105
+ "rewards/margins": 0.01750265061855316,
106
+ "rewards/rejected": -0.00369067071005702,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.071833610534668,
113
+ "logits/rejected": -2.076794147491455,
114
+ "logps/chosen": -33.93788528442383,
115
+ "logps/rejected": -36.575801849365234,
116
+ "loss": 0.4976,
117
+ "rewards/accuracies": 0.5874999761581421,
118
+ "rewards/chosen": 0.010120250284671783,
119
+ "rewards/margins": 0.011506280861794949,
120
+ "rewards/rejected": -0.0013860296458005905,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9325587749481201,
127
+ "logits/rejected": -1.9356613159179688,
128
+ "logps/chosen": -34.26636505126953,
129
+ "logps/rejected": -34.54140090942383,
130
+ "loss": 0.4947,
131
+ "rewards/accuracies": 0.550000011920929,
132
+ "rewards/chosen": 0.032289423048496246,
133
+ "rewards/margins": 0.022087663412094116,
134
+ "rewards/rejected": 0.01020175963640213,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.942940354347229,
141
+ "logits/rejected": -1.9474563598632812,
142
+ "logps/chosen": -32.32842254638672,
143
+ "logps/rejected": -32.28204345703125,
144
+ "loss": 0.4965,
145
+ "rewards/accuracies": 0.574999988079071,
146
+ "rewards/chosen": 0.027799556031823158,
147
+ "rewards/margins": 0.012426125817000866,
148
+ "rewards/rejected": 0.015373429283499718,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.0405964851379395,
155
+ "logits/rejected": -2.0386147499084473,
156
+ "logps/chosen": -32.07122039794922,
157
+ "logps/rejected": -31.215023040771484,
158
+ "loss": 0.4947,
159
+ "rewards/accuracies": 0.574999988079071,
160
+ "rewards/chosen": 0.033922821283340454,
161
+ "rewards/margins": 0.02368539571762085,
162
+ "rewards/rejected": 0.010237427428364754,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2347991466522217,
168
+ "eval_logits/rejected": -2.229947805404663,
169
+ "eval_logps/chosen": -33.91511917114258,
170
+ "eval_logps/rejected": -37.412628173828125,
171
+ "eval_loss": 0.499397873878479,
172
+ "eval_rewards/accuracies": 0.5215947031974792,
173
+ "eval_rewards/chosen": 0.023886699229478836,
174
+ "eval_rewards/margins": 0.0030889539048075676,
175
+ "eval_rewards/rejected": 0.020797746255993843,
176
+ "eval_runtime": 145.7619,
177
  "eval_samples_per_second": 2.353,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
 
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9959571361541748,
185
+ "logits/rejected": -1.99361252784729,
186
+ "logps/chosen": -32.979209899902344,
187
+ "logps/rejected": -33.898841857910156,
188
+ "loss": 0.4925,
189
+ "rewards/accuracies": 0.612500011920929,
190
+ "rewards/chosen": 0.052921634167432785,
191
+ "rewards/margins": 0.026816055178642273,
192
+ "rewards/rejected": 0.026105573400855064,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.007497787475586,
199
+ "logits/rejected": -1.9991832971572876,
200
+ "logps/chosen": -32.195396423339844,
201
+ "logps/rejected": -31.9866886138916,
202
+ "loss": 0.4967,
203
+ "rewards/accuracies": 0.5249999761581421,
204
+ "rewards/chosen": 0.04990261048078537,
205
+ "rewards/margins": 0.01493864320218563,
206
+ "rewards/rejected": 0.03496397286653519,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0352938175201416,
213
+ "logits/rejected": -2.027338743209839,
214
+ "logps/chosen": -30.16824722290039,
215
+ "logps/rejected": -31.9173526763916,
216
+ "loss": 0.4919,
217
+ "rewards/accuracies": 0.625,
218
+ "rewards/chosen": 0.061494071036577225,
219
+ "rewards/margins": 0.034362830221652985,
220
+ "rewards/rejected": 0.027131233364343643,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9656566381454468,
227
+ "logits/rejected": -1.9758468866348267,
228
+ "logps/chosen": -31.05475425720215,
229
+ "logps/rejected": -32.390235900878906,
230
+ "loss": 0.4893,
231
+ "rewards/accuracies": 0.637499988079071,
232
+ "rewards/chosen": 0.0729818269610405,
233
+ "rewards/margins": 0.04405337944626808,
234
+ "rewards/rejected": 0.028928453102707863,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.877623200416565,
241
+ "logits/rejected": -1.8787848949432373,
242
+ "logps/chosen": -33.642906188964844,
243
+ "logps/rejected": -34.57593536376953,
244
+ "loss": 0.4826,
245
  "rewards/accuracies": 0.6000000238418579,
246
+ "rewards/chosen": 0.10995662212371826,
247
+ "rewards/margins": 0.07369254529476166,
248
+ "rewards/rejected": 0.0362640880048275,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9285871982574463,
255
+ "logits/rejected": -1.9251912832260132,
256
+ "logps/chosen": -35.79336929321289,
257
+ "logps/rejected": -32.49341583251953,
258
+ "loss": 0.4925,
259
+ "rewards/accuracies": 0.574999988079071,
260
+ "rewards/chosen": 0.07566188275814056,
261
+ "rewards/margins": 0.029793858528137207,
262
+ "rewards/rejected": 0.045868031680583954,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0295512676239014,
269
+ "logits/rejected": -2.0222458839416504,
270
+ "logps/chosen": -33.23942947387695,
271
+ "logps/rejected": -31.19607925415039,
272
+ "loss": 0.4806,
273
+ "rewards/accuracies": 0.675000011920929,
274
+ "rewards/chosen": 0.11168631166219711,
275
+ "rewards/margins": 0.08053232729434967,
276
+ "rewards/rejected": 0.0311539676040411,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0365030765533447,
283
+ "logits/rejected": -2.0416781902313232,
284
+ "logps/chosen": -31.986160278320312,
285
+ "logps/rejected": -32.17144012451172,
286
+ "loss": 0.4878,
287
+ "rewards/accuracies": 0.637499988079071,
288
+ "rewards/chosen": 0.11172527074813843,
289
+ "rewards/margins": 0.048985324800014496,
290
+ "rewards/rejected": 0.06273995339870453,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0367565155029297,
297
+ "logits/rejected": -2.0340359210968018,
298
+ "logps/chosen": -31.0958194732666,
299
+ "logps/rejected": -31.11456298828125,
300
+ "loss": 0.4905,
301
+ "rewards/accuracies": 0.637499988079071,
302
+ "rewards/chosen": 0.07862688601016998,
303
+ "rewards/margins": 0.03789640590548515,
304
+ "rewards/rejected": 0.04073048755526543,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.90771484375,
311
+ "logits/rejected": -1.912388801574707,
312
+ "logps/chosen": -31.08389663696289,
313
+ "logps/rejected": -32.62942123413086,
314
+ "loss": 0.4825,
315
+ "rewards/accuracies": 0.699999988079071,
316
+ "rewards/chosen": 0.10447730123996735,
317
+ "rewards/margins": 0.07217548787593842,
318
+ "rewards/rejected": 0.032301802188158035,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2322466373443604,
324
+ "eval_logits/rejected": -2.2274229526519775,
325
+ "eval_logps/chosen": -33.779205322265625,
326
+ "eval_logps/rejected": -37.31794357299805,
327
+ "eval_loss": 0.4973558187484741,
328
+ "eval_rewards/accuracies": 0.5544019937515259,
329
+ "eval_rewards/chosen": 0.05106903612613678,
330
+ "eval_rewards/margins": 0.011334729380905628,
331
+ "eval_rewards/rejected": 0.039734311401844025,
332
+ "eval_runtime": 145.5488,
333
+ "eval_samples_per_second": 2.357,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0204057693481445,
341
+ "logits/rejected": -2.0309972763061523,
342
+ "logps/chosen": -31.54058837890625,
343
+ "logps/rejected": -33.71467590332031,
344
+ "loss": 0.4864,
345
+ "rewards/accuracies": 0.6000000238418579,
346
+ "rewards/chosen": 0.08614270389080048,
347
+ "rewards/margins": 0.0565376803278923,
348
+ "rewards/rejected": 0.029605034738779068,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9130290746688843,
355
+ "logits/rejected": -1.9277244806289673,
356
+ "logps/chosen": -29.63765525817871,
357
+ "logps/rejected": -31.437763214111328,
358
+ "loss": 0.4831,
359
  "rewards/accuracies": 0.7124999761581421,
360
+ "rewards/chosen": 0.09477666765451431,
361
+ "rewards/margins": 0.07069384306669235,
362
+ "rewards/rejected": 0.02408282831311226,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9711300134658813,
369
+ "logits/rejected": -1.9751195907592773,
370
+ "logps/chosen": -32.86473846435547,
371
+ "logps/rejected": -31.40615463256836,
372
+ "loss": 0.4801,
373
+ "rewards/accuracies": 0.7124999761581421,
374
+ "rewards/chosen": 0.11054597795009613,
375
+ "rewards/margins": 0.0819164589047432,
376
+ "rewards/rejected": 0.02862953022122383,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9691890478134155,
383
+ "logits/rejected": -1.9474375247955322,
384
+ "logps/chosen": -33.62578201293945,
385
+ "logps/rejected": -34.88764953613281,
386
+ "loss": 0.4781,
387
+ "rewards/accuracies": 0.699999988079071,
388
+ "rewards/chosen": 0.10559363663196564,
389
+ "rewards/margins": 0.0929645225405693,
390
+ "rewards/rejected": 0.012629099190235138,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.010312557220459,
397
+ "logits/rejected": -2.007035732269287,
398
+ "logps/chosen": -32.49589920043945,
399
+ "logps/rejected": -35.9986686706543,
400
+ "loss": 0.49,
401
  "rewards/accuracies": 0.5874999761581421,
402
+ "rewards/chosen": 0.08583381026983261,
403
+ "rewards/margins": 0.040582992136478424,
404
+ "rewards/rejected": 0.04525081440806389,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8778746128082275,
411
+ "logits/rejected": -1.8754326105117798,
412
+ "logps/chosen": -33.7269287109375,
413
+ "logps/rejected": -35.267066955566406,
414
+ "loss": 0.4893,
415
+ "rewards/accuracies": 0.574999988079071,
416
+ "rewards/chosen": 0.09229358285665512,
417
+ "rewards/margins": 0.044782862067222595,
418
+ "rewards/rejected": 0.047510724514722824,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8628604412078857,
425
+ "logits/rejected": -1.8603498935699463,
426
+ "logps/chosen": -33.917144775390625,
427
+ "logps/rejected": -31.5814151763916,
428
+ "loss": 0.4863,
429
+ "rewards/accuracies": 0.637499988079071,
430
+ "rewards/chosen": 0.09609059244394302,
431
+ "rewards/margins": 0.058107007294893265,
432
+ "rewards/rejected": 0.03798357769846916,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9663734436035156,
439
+ "logits/rejected": -1.9559385776519775,
440
+ "logps/chosen": -34.753868103027344,
441
+ "logps/rejected": -31.635868072509766,
442
+ "loss": 0.4806,
443
+ "rewards/accuracies": 0.625,
444
+ "rewards/chosen": 0.1205538734793663,
445
+ "rewards/margins": 0.07952861487865448,
446
+ "rewards/rejected": 0.04102526605129242,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.062568187713623,
453
+ "logits/rejected": -2.047731399536133,
454
+ "logps/chosen": -30.416040420532227,
455
+ "logps/rejected": -32.359901428222656,
456
+ "loss": 0.4895,
457
+ "rewards/accuracies": 0.5874999761581421,
458
+ "rewards/chosen": 0.1016167551279068,
459
+ "rewards/margins": 0.04322956129908562,
460
+ "rewards/rejected": 0.05838719755411148,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.93375563621521,
467
+ "logits/rejected": -1.9313066005706787,
468
+ "logps/chosen": -32.096317291259766,
469
+ "logps/rejected": -30.669086456298828,
470
+ "loss": 0.4669,
471
+ "rewards/accuracies": 0.7250000238418579,
472
+ "rewards/chosen": 0.16615837812423706,
473
+ "rewards/margins": 0.13661329448223114,
474
+ "rewards/rejected": 0.029545078054070473,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.231231927871704,
480
+ "eval_logits/rejected": -2.226423978805542,
481
+ "eval_logps/chosen": -33.768104553222656,
482
+ "eval_logps/rejected": -37.29201889038086,
483
+ "eval_loss": 0.49796417355537415,
484
+ "eval_rewards/accuracies": 0.5157807469367981,
485
+ "eval_rewards/chosen": 0.05328937619924545,
486
+ "eval_rewards/margins": 0.008368566632270813,
487
+ "eval_rewards/rejected": 0.04492080584168434,
488
+ "eval_runtime": 145.91,
489
+ "eval_samples_per_second": 2.351,
490
+ "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9169145822525024,
497
+ "logits/rejected": -1.9137403964996338,
498
+ "logps/chosen": -31.04461097717285,
499
+ "logps/rejected": -33.57860565185547,
500
+ "loss": 0.4809,
501
+ "rewards/accuracies": 0.75,
502
+ "rewards/chosen": 0.11174802482128143,
503
+ "rewards/margins": 0.07903869450092316,
504
+ "rewards/rejected": 0.03270933777093887,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9684025049209595,
511
+ "logits/rejected": -1.956284523010254,
512
+ "logps/chosen": -34.03736114501953,
513
+ "logps/rejected": -33.44590377807617,
514
+ "loss": 0.4778,
515
+ "rewards/accuracies": 0.762499988079071,
516
+ "rewards/chosen": 0.11174283176660538,
517
+ "rewards/margins": 0.09315498173236847,
518
+ "rewards/rejected": 0.018587838858366013,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.003418445587158,
525
+ "logits/rejected": -2.0020651817321777,
526
+ "logps/chosen": -32.87415313720703,
527
+ "logps/rejected": -32.24354553222656,
528
+ "loss": 0.4816,
529
+ "rewards/accuracies": 0.675000011920929,
530
+ "rewards/chosen": 0.12232844531536102,
531
+ "rewards/margins": 0.07512133568525314,
532
+ "rewards/rejected": 0.04720713198184967,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.090510845184326,
539
+ "logits/rejected": -2.0748770236968994,
540
+ "logps/chosen": -33.48200607299805,
541
+ "logps/rejected": -32.82493209838867,
542
+ "loss": 0.4807,
543
+ "rewards/accuracies": 0.699999988079071,
544
+ "rewards/chosen": 0.13947655260562897,
545
+ "rewards/margins": 0.0808553546667099,
546
+ "rewards/rejected": 0.05862119793891907,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9624559879302979,
553
+ "logits/rejected": -1.9616531133651733,
554
+ "logps/chosen": -32.56671142578125,
555
+ "logps/rejected": -32.222007751464844,
556
+ "loss": 0.4774,
557
  "rewards/accuracies": 0.675000011920929,
558
+ "rewards/chosen": 0.14307641983032227,
559
+ "rewards/margins": 0.09444761276245117,
560
+ "rewards/rejected": 0.04862881451845169,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9191839694976807,
567
+ "logits/rejected": -1.9294923543930054,
568
+ "logps/chosen": -31.610462188720703,
569
+ "logps/rejected": -35.02184295654297,
570
+ "loss": 0.4841,
571
+ "rewards/accuracies": 0.675000011920929,
572
+ "rewards/chosen": 0.11884615570306778,
573
+ "rewards/margins": 0.06546333432197571,
574
+ "rewards/rejected": 0.05338282510638237,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.0576701164245605,
581
+ "logits/rejected": -2.051162004470825,
582
+ "logps/chosen": -33.00985336303711,
583
+ "logps/rejected": -28.984561920166016,
584
+ "loss": 0.4803,
585
  "rewards/accuracies": 0.75,
586
+ "rewards/chosen": 0.12842342257499695,
587
+ "rewards/margins": 0.08005058020353317,
588
+ "rewards/rejected": 0.04837283492088318,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9184468984603882,
595
+ "logits/rejected": -1.9206174612045288,
596
+ "logps/chosen": -33.628562927246094,
597
+ "logps/rejected": -30.75335121154785,
598
+ "loss": 0.4766,
599
+ "rewards/accuracies": 0.75,
600
+ "rewards/chosen": 0.12209834903478622,
601
+ "rewards/margins": 0.09824265539646149,
602
+ "rewards/rejected": 0.023855695500969887,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.4876757522682091,
610
+ "train_runtime": 3250.1859,
611
+ "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],