hugodk-sch commited on
Commit
3aeaeb8
1 Parent(s): 7a46457

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +376 -376
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4913
24
- - Rewards/chosen: 0.1914
25
- - Rewards/rejected: 0.1556
26
- - Rewards/accuracies: 0.5453
27
- - Rewards/margins: 0.0358
28
- - Logps/rejected: -37.2944
29
- - Logps/chosen: -33.7611
30
- - Logits/rejected: -2.2272
31
- - Logits/chosen: -2.2320
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.4799 | 0.26 | 100 | 0.4978 | 0.1217 | 0.1117 | 0.5341 | 0.0100 | -37.3570 | -33.8607 | -2.2333 | -2.2381 |
67
- | 0.4453 | 0.52 | 200 | 0.4928 | 0.1862 | 0.1561 | 0.5370 | 0.0302 | -37.2937 | -33.7685 | -2.2299 | -2.2347 |
68
- | 0.3947 | 0.78 | 300 | 0.4910 | 0.1956 | 0.1591 | 0.5565 | 0.0365 | -37.2894 | -33.7551 | -2.2274 | -2.2322 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.5843
21
+ - Rewards/chosen: 0.0001
22
+ - Rewards/rejected: -0.0131
23
+ - Rewards/accuracies: 0.5278
24
+ - Rewards/margins: 0.0132
25
+ - Logps/rejected: -37.5353
26
+ - Logps/chosen: -34.0344
27
+ - Logits/rejected: -2.2322
28
+ - Logits/chosen: -2.2371
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.4711 | 0.26 | 100 | 0.5755 | 0.0163 | 0.0131 | 0.5195 | 0.0032 | -37.4979 | -34.0113 | -2.2352 | -2.2401 |
64
+ | 0.5061 | 0.52 | 200 | 0.5877 | -0.0108 | -0.0202 | 0.4992 | 0.0094 | -37.5455 | -34.0500 | -2.2337 | -2.2385 |
65
+ | 0.3371 | 0.78 | 300 | 0.5843 | 0.0001 | -0.0131 | 0.5278 | 0.0132 | -37.5353 | -34.0344 | -2.2322 | -2.2371 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8820dc9f9b650b645197e8167eedbab8af35175306fc4608b15579e49aa98e89
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e3e8f071db6cc19f55ac20735e7ca5976169e76cd665df27f42360dea5c708
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2319846153259277,
4
- "eval_logits/rejected": -2.227167844772339,
5
- "eval_logps/chosen": -33.76113510131836,
6
- "eval_logps/rejected": -37.29436492919922,
7
- "eval_loss": 0.49125906825065613,
8
- "eval_rewards/accuracies": 0.545265793800354,
9
- "eval_rewards/chosen": 0.19139321148395538,
10
- "eval_rewards/margins": 0.03581271693110466,
11
- "eval_rewards/rejected": 0.15558050572872162,
12
- "eval_runtime": 145.6689,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.355,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.45851393303313814,
17
- "train_runtime": 3252.0771,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.46899499707407766,
4
+ "train_runtime": 3249.86,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.45851393303313814,
4
- "train_runtime": 3252.0771,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.46899499707407766,
4
+ "train_runtime": 3249.86,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.5,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,589 +25,589 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8667322397232056,
29
- "logits/rejected": -1.8710299730300903,
30
- "logps/chosen": -36.98433303833008,
31
- "logps/rejected": -33.65640640258789,
32
- "loss": 0.4948,
33
  "rewards/accuracies": 0.5277777910232544,
34
- "rewards/chosen": 0.015585740096867085,
35
- "rewards/margins": 0.034019216895103455,
36
- "rewards/rejected": -0.018433474004268646,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9972655773162842,
43
- "logits/rejected": -1.9999065399169922,
44
- "logps/chosen": -29.6416072845459,
45
- "logps/rejected": -29.046981811523438,
46
- "loss": 0.5013,
47
  "rewards/accuracies": 0.5,
48
- "rewards/chosen": 0.00041136163054034114,
49
- "rewards/margins": -0.011113673448562622,
50
- "rewards/rejected": 0.011525033973157406,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9205191135406494,
57
- "logits/rejected": -1.9178133010864258,
58
- "logps/chosen": -31.385412216186523,
59
- "logps/rejected": -33.23365020751953,
60
- "loss": 0.4942,
61
- "rewards/accuracies": 0.5375000238418579,
62
- "rewards/chosen": 0.02147640287876129,
63
- "rewards/margins": 0.03211076930165291,
64
- "rewards/rejected": -0.010634368285536766,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.017784833908081,
71
- "logits/rejected": -2.009059429168701,
72
- "logps/chosen": -32.547088623046875,
73
- "logps/rejected": -32.473243713378906,
74
- "loss": 0.5024,
75
- "rewards/accuracies": 0.4749999940395355,
76
- "rewards/chosen": 0.020800117403268814,
77
- "rewards/margins": -0.006797379348427057,
78
- "rewards/rejected": 0.027597496286034584,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.863996148109436,
85
- "logits/rejected": -1.8532311916351318,
86
- "logps/chosen": -33.490657806396484,
87
- "logps/rejected": -35.415367126464844,
88
- "loss": 0.4957,
89
- "rewards/accuracies": 0.550000011920929,
90
- "rewards/chosen": 0.046776313334703445,
91
- "rewards/margins": 0.02089320495724678,
92
- "rewards/rejected": 0.025883108377456665,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9439821243286133,
99
- "logits/rejected": -1.9459091424942017,
100
- "logps/chosen": -32.50756072998047,
101
- "logps/rejected": -33.15720748901367,
102
- "loss": 0.489,
103
- "rewards/accuracies": 0.550000011920929,
104
- "rewards/chosen": 0.06483479589223862,
105
- "rewards/margins": 0.05491489917039871,
106
- "rewards/rejected": 0.009919902309775352,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.075462579727173,
113
- "logits/rejected": -2.0804481506347656,
114
- "logps/chosen": -33.86588668823242,
115
- "logps/rejected": -36.501365661621094,
116
- "loss": 0.491,
117
- "rewards/accuracies": 0.5249999761581421,
118
- "rewards/chosen": 0.08581922948360443,
119
- "rewards/margins": 0.038563162088394165,
120
- "rewards/rejected": 0.04725607484579086,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9374949932098389,
127
- "logits/rejected": -1.9405946731567383,
128
- "logps/chosen": -34.200557708740234,
129
- "logps/rejected": -34.543209075927734,
130
- "loss": 0.4719,
131
- "rewards/accuracies": 0.574999988079071,
132
- "rewards/chosen": 0.15907494723796844,
133
- "rewards/margins": 0.1246359720826149,
134
- "rewards/rejected": 0.03443896025419235,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9453551769256592,
141
- "logits/rejected": -1.9498746395111084,
142
- "logps/chosen": -32.2825927734375,
143
- "logps/rejected": -32.25676727294922,
144
- "loss": 0.4858,
145
- "rewards/accuracies": 0.637499988079071,
146
- "rewards/chosen": 0.12937995791435242,
147
- "rewards/margins": 0.05788033455610275,
148
- "rewards/rejected": 0.07149962335824966,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.04329776763916,
155
- "logits/rejected": -2.0412919521331787,
156
- "logps/chosen": -32.015525817871094,
157
- "logps/rejected": -31.169103622436523,
158
- "loss": 0.4799,
159
- "rewards/accuracies": 0.637499988079071,
160
- "rewards/chosen": 0.15771625936031342,
161
- "rewards/margins": 0.08974156528711319,
162
- "rewards/rejected": 0.06797470152378082,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.238145351409912,
168
- "eval_logits/rejected": -2.2333030700683594,
169
- "eval_logps/chosen": -33.86069107055664,
170
- "eval_logps/rejected": -37.356990814208984,
171
- "eval_loss": 0.49784979224205017,
172
- "eval_rewards/accuracies": 0.5340532064437866,
173
- "eval_rewards/chosen": 0.12170296907424927,
174
- "eval_rewards/margins": 0.009964452125132084,
175
- "eval_rewards/rejected": 0.11173851788043976,
176
- "eval_runtime": 145.9039,
177
- "eval_samples_per_second": 2.351,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9996016025543213,
185
- "logits/rejected": -1.9972642660140991,
186
- "logps/chosen": -32.95330810546875,
187
- "logps/rejected": -33.88172912597656,
188
- "loss": 0.4742,
189
- "rewards/accuracies": 0.612500011920929,
190
- "rewards/chosen": 0.20335713028907776,
191
- "rewards/margins": 0.10000836849212646,
192
- "rewards/rejected": 0.10334876924753189,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0096044540405273,
199
- "logits/rejected": -2.0012660026550293,
200
- "logps/chosen": -32.153419494628906,
201
- "logps/rejected": -31.961782455444336,
202
- "loss": 0.4848,
203
- "rewards/accuracies": 0.574999988079071,
204
- "rewards/chosen": 0.20403961837291718,
205
- "rewards/margins": 0.06423385441303253,
206
- "rewards/rejected": 0.13980577886104584,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.038412570953369,
213
- "logits/rejected": -2.0304617881774902,
214
- "logps/chosen": -30.165945053100586,
215
- "logps/rejected": -31.880346298217773,
216
- "loss": 0.4787,
217
- "rewards/accuracies": 0.5625,
218
- "rewards/chosen": 0.21683910489082336,
219
- "rewards/margins": 0.0959777683019638,
220
- "rewards/rejected": 0.12086131423711777,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9683482646942139,
227
- "logits/rejected": -1.9784959554672241,
228
- "logps/chosen": -31.041671752929688,
229
- "logps/rejected": -32.388893127441406,
230
- "loss": 0.4621,
231
- "rewards/accuracies": 0.6625000238418579,
232
- "rewards/chosen": 0.264592707157135,
233
- "rewards/margins": 0.16240297257900238,
234
- "rewards/rejected": 0.10218973457813263,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8799880743026733,
241
- "logits/rejected": -1.8811382055282593,
242
- "logps/chosen": -33.686256408691406,
243
- "logps/rejected": -34.564491271972656,
244
- "loss": 0.4503,
245
- "rewards/accuracies": 0.6000000238418579,
246
- "rewards/chosen": 0.3545047640800476,
247
- "rewards/margins": 0.21957096457481384,
248
- "rewards/rejected": 0.134933739900589,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.931627869606018,
255
- "logits/rejected": -1.9282808303833008,
256
- "logps/chosen": -35.77287292480469,
257
- "logps/rejected": -32.49742889404297,
258
- "loss": 0.4707,
259
- "rewards/accuracies": 0.637499988079071,
260
- "rewards/chosen": 0.27916350960731506,
261
- "rewards/margins": 0.12143567949533463,
262
- "rewards/rejected": 0.15772780776023865,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0332443714141846,
269
- "logits/rejected": -2.0259475708007812,
270
- "logps/chosen": -33.251670837402344,
271
- "logps/rejected": -31.17826271057129,
272
- "loss": 0.439,
273
- "rewards/accuracies": 0.675000011920929,
274
- "rewards/chosen": 0.382335364818573,
275
- "rewards/margins": 0.2608257234096527,
276
- "rewards/rejected": 0.1215096265077591,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.040196657180786,
283
- "logits/rejected": -2.045389413833618,
284
- "logps/chosen": -31.95781898498535,
285
- "logps/rejected": -32.18687057495117,
286
- "loss": 0.4516,
287
- "rewards/accuracies": 0.699999988079071,
288
- "rewards/chosen": 0.4108788073062897,
289
- "rewards/margins": 0.20208871364593506,
290
- "rewards/rejected": 0.20879006385803223,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.040775775909424,
297
- "logits/rejected": -2.038043737411499,
298
- "logps/chosen": -31.056228637695312,
299
- "logps/rejected": -31.10147476196289,
300
- "loss": 0.4647,
301
- "rewards/accuracies": 0.675000011920929,
302
- "rewards/chosen": 0.30290743708610535,
303
- "rewards/margins": 0.1511872559785843,
304
- "rewards/rejected": 0.15172019600868225,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9112863540649414,
311
- "logits/rejected": -1.9159523248672485,
312
- "logps/chosen": -31.08163070678711,
313
- "logps/rejected": -32.598487854003906,
314
- "loss": 0.4453,
315
- "rewards/accuracies": 0.6625000238418579,
316
- "rewards/chosen": 0.3672560155391693,
317
- "rewards/margins": 0.2325470894575119,
318
- "rewards/rejected": 0.1347089260816574,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.234708309173584,
324
- "eval_logits/rejected": -2.229902505874634,
325
- "eval_logps/chosen": -33.76850891113281,
326
- "eval_logps/rejected": -37.29365921020508,
327
- "eval_loss": 0.49275800585746765,
328
- "eval_rewards/accuracies": 0.5369601845741272,
329
- "eval_rewards/chosen": 0.18622951209545135,
330
- "eval_rewards/margins": 0.030155813321471214,
331
- "eval_rewards/rejected": 0.1560736745595932,
332
- "eval_runtime": 145.5287,
333
- "eval_samples_per_second": 2.357,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.022939443588257,
341
- "logits/rejected": -2.033546209335327,
342
- "logps/chosen": -31.5457706451416,
343
- "logps/rejected": -33.67595672607422,
344
- "loss": 0.4628,
345
- "rewards/accuracies": 0.574999988079071,
346
- "rewards/chosen": 0.2978750169277191,
347
- "rewards/margins": 0.16715146601200104,
348
- "rewards/rejected": 0.13072356581687927,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9158331155776978,
355
- "logits/rejected": -1.9305419921875,
356
- "logps/chosen": -29.57406997680664,
357
- "logps/rejected": -31.42013168334961,
358
- "loss": 0.4363,
359
- "rewards/accuracies": 0.699999988079071,
360
- "rewards/chosen": 0.37622833251953125,
361
- "rewards/margins": 0.2795966863632202,
362
- "rewards/rejected": 0.09663165360689163,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9733517169952393,
369
- "logits/rejected": -1.9773433208465576,
370
- "logps/chosen": -32.83649444580078,
371
- "logps/rejected": -31.40326499938965,
372
- "loss": 0.4311,
373
- "rewards/accuracies": 0.699999988079071,
374
- "rewards/chosen": 0.40668267011642456,
375
- "rewards/margins": 0.304456502199173,
376
- "rewards/rejected": 0.10222617536783218,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9713729619979858,
383
- "logits/rejected": -1.9496545791625977,
384
- "logps/chosen": -33.5602912902832,
385
- "logps/rejected": -34.85026550292969,
386
- "loss": 0.4227,
387
- "rewards/accuracies": 0.75,
388
- "rewards/chosen": 0.41541916131973267,
389
- "rewards/margins": 0.3450462222099304,
390
- "rewards/rejected": 0.07037289440631866,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.012058734893799,
397
- "logits/rejected": -2.0087602138519287,
398
- "logps/chosen": -32.46921157836914,
399
- "logps/rejected": -36.012245178222656,
400
- "loss": 0.4613,
401
  "rewards/accuracies": 0.637499988079071,
402
- "rewards/chosen": 0.31910061836242676,
403
- "rewards/margins": 0.17022350430488586,
404
- "rewards/rejected": 0.1488770991563797,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.878584861755371,
411
- "logits/rejected": -1.876153588294983,
412
- "logps/chosen": -33.71690368652344,
413
- "logps/rejected": -35.26210403442383,
414
- "loss": 0.4631,
415
- "rewards/accuracies": 0.6499999761581421,
416
- "rewards/chosen": 0.33004888892173767,
417
- "rewards/margins": 0.1602855622768402,
418
- "rewards/rejected": 0.16976332664489746,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.863705039024353,
425
- "logits/rejected": -1.8611875772476196,
426
- "logps/chosen": -33.94118881225586,
427
- "logps/rejected": -31.579448699951172,
428
- "loss": 0.4582,
429
- "rewards/accuracies": 0.612500011920929,
430
- "rewards/chosen": 0.3194878101348877,
431
- "rewards/margins": 0.18517015874385834,
432
- "rewards/rejected": 0.13431766629219055,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.96773362159729,
439
- "logits/rejected": -1.957313895225525,
440
- "logps/chosen": -34.72475814819336,
441
- "logps/rejected": -31.634963989257812,
442
- "loss": 0.4297,
443
- "rewards/accuracies": 0.75,
444
- "rewards/chosen": 0.4423200190067291,
445
- "rewards/margins": 0.298100084066391,
446
- "rewards/rejected": 0.14421990513801575,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0632526874542236,
453
- "logits/rejected": -2.0484511852264404,
454
- "logps/chosen": -30.412399291992188,
455
- "logps/rejected": -32.339237213134766,
456
- "loss": 0.4682,
457
- "rewards/accuracies": 0.550000011920929,
458
- "rewards/chosen": 0.3582096993923187,
459
- "rewards/margins": 0.13939058780670166,
460
- "rewards/rejected": 0.21881911158561707,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9345197677612305,
467
- "logits/rejected": -1.9320701360702515,
468
- "logps/chosen": -32.11776351928711,
469
- "logps/rejected": -30.692516326904297,
470
- "loss": 0.3947,
471
- "rewards/accuracies": 0.7250000238418579,
472
- "rewards/chosen": 0.5665432810783386,
473
- "rewards/margins": 0.4795381426811218,
474
- "rewards/rejected": 0.08700509369373322,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2321932315826416,
480
- "eval_logits/rejected": -2.227381944656372,
481
- "eval_logps/chosen": -33.75507736206055,
482
- "eval_logps/rejected": -37.28935623168945,
483
- "eval_loss": 0.49099090695381165,
484
- "eval_rewards/accuracies": 0.5564784407615662,
485
- "eval_rewards/chosen": 0.1956319361925125,
486
- "eval_rewards/margins": 0.03654861077666283,
487
- "eval_rewards/rejected": 0.15908333659172058,
488
- "eval_runtime": 145.678,
489
- "eval_samples_per_second": 2.355,
490
- "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.918630838394165,
497
- "logits/rejected": -1.9154895544052124,
498
- "logps/chosen": -31.033864974975586,
499
- "logps/rejected": -33.57470703125,
500
- "loss": 0.4357,
501
- "rewards/accuracies": 0.7749999761581421,
502
- "rewards/chosen": 0.3986373543739319,
503
- "rewards/margins": 0.28142648935317993,
504
- "rewards/rejected": 0.11721093952655792,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9683278799057007,
511
- "logits/rejected": -1.9561859369277954,
512
- "logps/chosen": -34.02568817138672,
513
- "logps/rejected": -33.45148849487305,
514
- "loss": 0.4237,
515
- "rewards/accuracies": 0.7749999761581421,
516
- "rewards/chosen": 0.39926695823669434,
517
- "rewards/margins": 0.3381231129169464,
518
- "rewards/rejected": 0.06114383786916733,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -2.0041966438293457,
525
- "logits/rejected": -2.0028293132781982,
526
- "logps/chosen": -32.882896423339844,
527
- "logps/rejected": -32.24889373779297,
528
- "loss": 0.4411,
529
- "rewards/accuracies": 0.637499988079071,
530
- "rewards/chosen": 0.4220260679721832,
531
- "rewards/margins": 0.26054397225379944,
532
- "rewards/rejected": 0.16148212552070618,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0908923149108887,
539
- "logits/rejected": -2.0752511024475098,
540
- "logps/chosen": -33.49097442626953,
541
- "logps/rejected": -32.81463623046875,
542
- "loss": 0.4381,
543
- "rewards/accuracies": 0.75,
544
- "rewards/chosen": 0.4818909764289856,
545
- "rewards/margins": 0.26951199769973755,
546
- "rewards/rejected": 0.21237893402576447,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9633783102035522,
553
- "logits/rejected": -1.9625694751739502,
554
- "logps/chosen": -32.55985641479492,
555
- "logps/rejected": -32.24340057373047,
556
- "loss": 0.4211,
557
- "rewards/accuracies": 0.699999988079071,
558
- "rewards/chosen": 0.5055657029151917,
559
- "rewards/margins": 0.3503406047821045,
560
- "rewards/rejected": 0.15522508323192596,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9187549352645874,
567
- "logits/rejected": -1.9290361404418945,
568
- "logps/chosen": -31.61210060119629,
569
- "logps/rejected": -35.0199089050293,
570
- "loss": 0.4489,
571
- "rewards/accuracies": 0.637499988079071,
572
- "rewards/chosen": 0.41481319069862366,
573
- "rewards/margins": 0.22661848366260529,
574
- "rewards/rejected": 0.18819470703601837,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0583438873291016,
581
- "logits/rejected": -2.051846742630005,
582
- "logps/chosen": -33.05150604248047,
583
- "logps/rejected": -28.9710750579834,
584
- "loss": 0.4429,
585
- "rewards/accuracies": 0.75,
586
- "rewards/chosen": 0.42032259702682495,
587
- "rewards/margins": 0.2415778934955597,
588
- "rewards/rejected": 0.17874471843242645,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9180545806884766,
595
- "logits/rejected": -1.92022705078125,
596
- "logps/chosen": -33.62676239013672,
597
- "logps/rejected": -30.719894409179688,
598
- "loss": 0.4281,
599
- "rewards/accuracies": 0.7124999761581421,
600
- "rewards/chosen": 0.4286006987094879,
601
- "rewards/margins": 0.3216874301433563,
602
- "rewards/rejected": 0.10691330581903458,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.45851393303313814,
610
- "train_runtime": 3252.0771,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.5102,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.866772174835205,
29
+ "logits/rejected": -1.8710780143737793,
30
+ "logps/chosen": -36.99943161010742,
31
+ "logps/rejected": -33.657470703125,
32
+ "loss": 0.4858,
33
  "rewards/accuracies": 0.5277777910232544,
34
+ "rewards/chosen": 0.005016062408685684,
35
+ "rewards/margins": 0.02419126406311989,
36
+ "rewards/rejected": -0.019175197929143906,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9975645542144775,
43
+ "logits/rejected": -2.0002217292785645,
44
+ "logps/chosen": -29.651592254638672,
45
+ "logps/rejected": -29.066898345947266,
46
+ "loss": 0.5542,
47
  "rewards/accuracies": 0.5,
48
+ "rewards/chosen": -0.006579822860658169,
49
+ "rewards/margins": -0.004162783268839121,
50
+ "rewards/rejected": -0.0024170405231416225,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9212992191314697,
57
+ "logits/rejected": -1.918600082397461,
58
+ "logps/chosen": -31.42719078063965,
59
+ "logps/rejected": -33.23390579223633,
60
+ "loss": 0.5492,
61
+ "rewards/accuracies": 0.5249999761581421,
62
+ "rewards/chosen": -0.00776728754863143,
63
+ "rewards/margins": 0.0030429032631218433,
64
+ "rewards/rejected": -0.010810190811753273,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0178468227386475,
71
+ "logits/rejected": -2.009111166000366,
72
+ "logps/chosen": -32.579471588134766,
73
+ "logps/rejected": -32.50147247314453,
74
+ "loss": 0.5878,
75
+ "rewards/accuracies": 0.44999998807907104,
76
+ "rewards/chosen": -0.001865685684606433,
77
+ "rewards/margins": -0.009705344215035439,
78
+ "rewards/rejected": 0.007839656434953213,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8617979288101196,
85
+ "logits/rejected": -1.8510282039642334,
86
+ "logps/chosen": -33.562862396240234,
87
+ "logps/rejected": -35.4472770690918,
88
+ "loss": 0.5775,
89
+ "rewards/accuracies": 0.42500001192092896,
90
+ "rewards/chosen": -0.0037666684947907925,
91
+ "rewards/margins": -0.007314275950193405,
92
+ "rewards/rejected": 0.0035476074554026127,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9399009943008423,
99
+ "logits/rejected": -1.9418485164642334,
100
+ "logps/chosen": -32.5726203918457,
101
+ "logps/rejected": -33.18496322631836,
102
+ "loss": 0.5967,
103
+ "rewards/accuracies": 0.5249999761581421,
104
+ "rewards/chosen": 0.019294817000627518,
105
+ "rewards/margins": 0.028803948312997818,
106
+ "rewards/rejected": -0.009509134106338024,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0729470252990723,
113
+ "logits/rejected": -2.077918529510498,
114
+ "logps/chosen": -33.93268585205078,
115
+ "logps/rejected": -36.637229919433594,
116
+ "loss": 0.5297,
117
+ "rewards/accuracies": 0.5874999761581421,
118
+ "rewards/chosen": 0.039063483476638794,
119
+ "rewards/margins": 0.08691467344760895,
120
+ "rewards/rejected": -0.04785118252038956,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9366486072540283,
127
+ "logits/rejected": -1.939780592918396,
128
+ "logps/chosen": -34.30228805541992,
129
+ "logps/rejected": -34.652069091796875,
130
+ "loss": 0.4209,
131
+ "rewards/accuracies": 0.637499988079071,
132
+ "rewards/chosen": 0.08786438405513763,
133
+ "rewards/margins": 0.12962770462036133,
134
+ "rewards/rejected": -0.041763320565223694,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.946979284286499,
141
+ "logits/rejected": -1.951521635055542,
142
+ "logps/chosen": -32.4210205078125,
143
+ "logps/rejected": -32.337799072265625,
144
+ "loss": 0.6191,
145
+ "rewards/accuracies": 0.5375000238418579,
146
+ "rewards/chosen": 0.03248247504234314,
147
+ "rewards/margins": 0.017702888697385788,
148
+ "rewards/rejected": 0.014779585413634777,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.045017719268799,
155
+ "logits/rejected": -2.043015241622925,
156
+ "logps/chosen": -32.16361618041992,
157
+ "logps/rejected": -31.273128509521484,
158
+ "loss": 0.4711,
159
+ "rewards/accuracies": 0.5874999761581421,
160
+ "rewards/chosen": 0.05405203625559807,
161
+ "rewards/margins": 0.05889582633972168,
162
+ "rewards/rejected": -0.004843792412430048,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.240097761154175,
168
+ "eval_logits/rejected": -2.2352383136749268,
169
+ "eval_logps/chosen": -34.01129150390625,
170
+ "eval_logps/rejected": -37.49793243408203,
171
+ "eval_loss": 0.5754798054695129,
172
+ "eval_rewards/accuracies": 0.5195183157920837,
173
+ "eval_rewards/chosen": 0.016279777511954308,
174
+ "eval_rewards/margins": 0.0031977586913853884,
175
+ "eval_rewards/rejected": 0.013082021847367287,
176
+ "eval_runtime": 145.9275,
177
+ "eval_samples_per_second": 2.35,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -2.0017762184143066,
185
+ "logits/rejected": -1.9993689060211182,
186
+ "logps/chosen": -33.13564682006836,
187
+ "logps/rejected": -33.997642517089844,
188
+ "loss": 0.6759,
189
+ "rewards/accuracies": 0.625,
190
+ "rewards/chosen": 0.07571890205144882,
191
+ "rewards/margins": 0.05350743606686592,
192
+ "rewards/rejected": 0.022211460396647453,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.013158082962036,
199
+ "logits/rejected": -2.004814624786377,
200
+ "logps/chosen": -32.32666015625,
201
+ "logps/rejected": -32.12488555908203,
202
+ "loss": 0.5572,
203
+ "rewards/accuracies": 0.550000011920929,
204
+ "rewards/chosen": 0.08277235180139542,
205
+ "rewards/margins": 0.05714136362075806,
206
+ "rewards/rejected": 0.02563098631799221,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.039836883544922,
213
+ "logits/rejected": -2.031825542449951,
214
+ "logps/chosen": -30.349987030029297,
215
+ "logps/rejected": -32.029136657714844,
216
+ "loss": 0.5109,
217
+ "rewards/accuracies": 0.574999988079071,
218
+ "rewards/chosen": 0.0880090743303299,
219
+ "rewards/margins": 0.07130132615566254,
220
+ "rewards/rejected": 0.016707751899957657,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9694139957427979,
227
+ "logits/rejected": -1.9796711206436157,
228
+ "logps/chosen": -31.24441909790039,
229
+ "logps/rejected": -32.56440353393555,
230
+ "loss": 0.5037,
231
+ "rewards/accuracies": 0.6499999761581421,
232
+ "rewards/chosen": 0.12266886234283447,
233
+ "rewards/margins": 0.14333835244178772,
234
+ "rewards/rejected": -0.02066950313746929,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8828538656234741,
241
+ "logits/rejected": -1.8840004205703735,
242
+ "logps/chosen": -34.006561279296875,
243
+ "logps/rejected": -34.81753921508789,
244
+ "loss": 0.4564,
245
+ "rewards/accuracies": 0.6625000238418579,
246
+ "rewards/chosen": 0.13028928637504578,
247
+ "rewards/margins": 0.17248663306236267,
248
+ "rewards/rejected": -0.04219735041260719,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9349613189697266,
255
+ "logits/rejected": -1.9315464496612549,
256
+ "logps/chosen": -36.059181213378906,
257
+ "logps/rejected": -32.72267150878906,
258
+ "loss": 0.487,
259
+ "rewards/accuracies": 0.6625000238418579,
260
+ "rewards/chosen": 0.07874591648578644,
261
+ "rewards/margins": 0.07869003713130951,
262
+ "rewards/rejected": 5.587190389633179e-05,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.036032199859619,
269
+ "logits/rejected": -2.028651714324951,
270
+ "logps/chosen": -33.573509216308594,
271
+ "logps/rejected": -31.36956214904785,
272
+ "loss": 0.4465,
273
+ "rewards/accuracies": 0.6875,
274
+ "rewards/chosen": 0.15704451501369476,
275
+ "rewards/margins": 0.16944527626037598,
276
+ "rewards/rejected": -0.012400749139487743,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.042617082595825,
283
+ "logits/rejected": -2.0478556156158447,
284
+ "logps/chosen": -32.33074188232422,
285
+ "logps/rejected": -32.472496032714844,
286
+ "loss": 0.3727,
287
+ "rewards/accuracies": 0.7250000238418579,
288
+ "rewards/chosen": 0.14983226358890533,
289
+ "rewards/margins": 0.14098060131072998,
290
+ "rewards/rejected": 0.008851657621562481,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0445618629455566,
297
+ "logits/rejected": -2.0417864322662354,
298
+ "logps/chosen": -31.317855834960938,
299
+ "logps/rejected": -31.348400115966797,
300
+ "loss": 0.4609,
301
+ "rewards/accuracies": 0.7250000238418579,
302
+ "rewards/chosen": 0.11976919323205948,
303
+ "rewards/margins": 0.14090058207511902,
304
+ "rewards/rejected": -0.021131375804543495,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9139589071273804,
311
+ "logits/rejected": -1.9186115264892578,
312
+ "logps/chosen": -31.420101165771484,
313
+ "logps/rejected": -32.79216766357422,
314
+ "loss": 0.5061,
315
+ "rewards/accuracies": 0.675000011920929,
316
+ "rewards/chosen": 0.13032536208629608,
317
+ "rewards/margins": 0.13119173049926758,
318
+ "rewards/rejected": -0.0008663811022415757,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2385354042053223,
324
+ "eval_logits/rejected": -2.2336885929107666,
325
+ "eval_logps/chosen": -34.05002975463867,
326
+ "eval_logps/rejected": -37.54545974731445,
327
+ "eval_loss": 0.5877167582511902,
328
+ "eval_rewards/accuracies": 0.4991694688796997,
329
+ "eval_rewards/chosen": -0.010832725092768669,
330
+ "eval_rewards/margins": 0.009353035129606724,
331
+ "eval_rewards/rejected": -0.020185761153697968,
332
+ "eval_runtime": 145.6958,
333
+ "eval_samples_per_second": 2.354,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.026780366897583,
341
+ "logits/rejected": -2.0374534130096436,
342
+ "logps/chosen": -31.786846160888672,
343
+ "logps/rejected": -33.922447204589844,
344
+ "loss": 0.4225,
345
+ "rewards/accuracies": 0.675000011920929,
346
+ "rewards/chosen": 0.12911871075630188,
347
+ "rewards/margins": 0.17094141244888306,
348
+ "rewards/rejected": -0.041822709143161774,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9182548522949219,
355
+ "logits/rejected": -1.933038353919983,
356
+ "logps/chosen": -29.936147689819336,
357
+ "logps/rejected": -31.559993743896484,
358
+ "loss": 0.4429,
359
+ "rewards/accuracies": 0.7124999761581421,
360
+ "rewards/chosen": 0.12277615070343018,
361
+ "rewards/margins": 0.12404797971248627,
362
+ "rewards/rejected": -0.0012718416983261704,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9737575054168701,
369
+ "logits/rejected": -1.9777438640594482,
370
+ "logps/chosen": -33.20977783203125,
371
+ "logps/rejected": -31.5734920501709,
372
+ "loss": 0.3768,
373
+ "rewards/accuracies": 0.637499988079071,
374
+ "rewards/chosen": 0.14538228511810303,
375
+ "rewards/margins": 0.16231416165828705,
376
+ "rewards/rejected": -0.016931891441345215,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9738047122955322,
383
+ "logits/rejected": -1.9518985748291016,
384
+ "logps/chosen": -33.94301223754883,
385
+ "logps/rejected": -35.023075103759766,
386
+ "loss": 0.4304,
387
+ "rewards/accuracies": 0.7250000238418579,
388
+ "rewards/chosen": 0.1475195735692978,
389
+ "rewards/margins": 0.19811663031578064,
390
+ "rewards/rejected": -0.05059707164764404,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0155911445617676,
397
+ "logits/rejected": -2.0122878551483154,
398
+ "logps/chosen": -32.76276397705078,
399
+ "logps/rejected": -36.25225830078125,
400
+ "loss": 0.4155,
401
  "rewards/accuracies": 0.637499988079071,
402
+ "rewards/chosen": 0.11361332982778549,
403
+ "rewards/margins": 0.1327473670244217,
404
+ "rewards/rejected": -0.019134048372507095,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8830368518829346,
411
+ "logits/rejected": -1.880629539489746,
412
+ "logps/chosen": -34.01710891723633,
413
+ "logps/rejected": -35.53314971923828,
414
+ "loss": 0.4219,
415
+ "rewards/accuracies": 0.7250000238418579,
416
+ "rewards/chosen": 0.11990444362163544,
417
+ "rewards/margins": 0.1398761123418808,
418
+ "rewards/rejected": -0.019971664994955063,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8685131072998047,
425
+ "logits/rejected": -1.8660367727279663,
426
+ "logps/chosen": -34.2363395690918,
427
+ "logps/rejected": -31.768321990966797,
428
+ "loss": 0.4918,
429
+ "rewards/accuracies": 0.675000011920929,
430
+ "rewards/chosen": 0.11288031190633774,
431
+ "rewards/margins": 0.11077453196048737,
432
+ "rewards/rejected": 0.0021057710982859135,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9722087383270264,
439
+ "logits/rejected": -1.961612343788147,
440
+ "logps/chosen": -35.04926300048828,
441
+ "logps/rejected": -31.826486587524414,
442
+ "loss": 0.3417,
443
+ "rewards/accuracies": 0.737500011920929,
444
+ "rewards/chosen": 0.21516695618629456,
445
+ "rewards/margins": 0.20501188933849335,
446
+ "rewards/rejected": 0.010155050083994865,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.068380832672119,
453
+ "logits/rejected": -2.05340313911438,
454
+ "logps/chosen": -30.748706817626953,
455
+ "logps/rejected": -32.63469314575195,
456
+ "loss": 0.4741,
457
+ "rewards/accuracies": 0.625,
458
+ "rewards/chosen": 0.12279413640499115,
459
+ "rewards/margins": 0.11079368740320206,
460
+ "rewards/rejected": 0.012000440619885921,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9397351741790771,
467
+ "logits/rejected": -1.9371974468231201,
468
+ "logps/chosen": -32.603660583496094,
469
+ "logps/rejected": -30.863941192626953,
470
+ "loss": 0.3371,
471
+ "rewards/accuracies": 0.7749999761581421,
472
+ "rewards/chosen": 0.2264125794172287,
473
+ "rewards/margins": 0.25940603017807007,
474
+ "rewards/rejected": -0.03299345448613167,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2370686531066895,
480
+ "eval_logits/rejected": -2.2322022914886475,
481
+ "eval_logps/chosen": -34.03439712524414,
482
+ "eval_logps/rejected": -37.53529357910156,
483
+ "eval_loss": 0.5842657089233398,
484
+ "eval_rewards/accuracies": 0.5278239250183105,
485
+ "eval_rewards/chosen": 0.00010742468293756247,
486
+ "eval_rewards/margins": 0.013178782537579536,
487
+ "eval_rewards/rejected": -0.013071359135210514,
488
+ "eval_runtime": 145.5131,
489
+ "eval_samples_per_second": 2.357,
490
+ "eval_steps_per_second": 0.296,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9219080209732056,
497
+ "logits/rejected": -1.9186557531356812,
498
+ "logps/chosen": -31.35263442993164,
499
+ "logps/rejected": -33.746925354003906,
500
+ "loss": 0.4182,
501
+ "rewards/accuracies": 0.75,
502
+ "rewards/chosen": 0.17550265789031982,
503
+ "rewards/margins": 0.17884370684623718,
504
+ "rewards/rejected": -0.0033410280011594296,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9732515811920166,
511
+ "logits/rejected": -1.9609721899032593,
512
+ "logps/chosen": -34.435001373291016,
513
+ "logps/rejected": -33.593482971191406,
514
+ "loss": 0.3778,
515
+ "rewards/accuracies": 0.675000011920929,
516
+ "rewards/chosen": 0.11274988949298859,
517
+ "rewards/margins": 0.15099774301052094,
518
+ "rewards/rejected": -0.03824785351753235,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.0085272789001465,
525
+ "logits/rejected": -2.0070912837982178,
526
+ "logps/chosen": -33.31962585449219,
527
+ "logps/rejected": -32.51466751098633,
528
+ "loss": 0.4061,
529
+ "rewards/accuracies": 0.675000011920929,
530
+ "rewards/chosen": 0.1163158044219017,
531
+ "rewards/margins": 0.14087431132793427,
532
+ "rewards/rejected": -0.024558518081903458,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0951385498046875,
539
+ "logits/rejected": -2.0793802738189697,
540
+ "logps/chosen": -33.88261032104492,
541
+ "logps/rejected": -33.10671615600586,
542
+ "loss": 0.4042,
543
+ "rewards/accuracies": 0.737500011920929,
544
+ "rewards/chosen": 0.20774619281291962,
545
+ "rewards/margins": 0.19982238113880157,
546
+ "rewards/rejected": 0.00792380329221487,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9677631855010986,
553
+ "logits/rejected": -1.9668546915054321,
554
+ "logps/chosen": -32.957977294921875,
555
+ "logps/rejected": -32.483890533447266,
556
+ "loss": 0.4874,
557
+ "rewards/accuracies": 0.75,
558
+ "rewards/chosen": 0.2268829345703125,
559
+ "rewards/margins": 0.2400023490190506,
560
+ "rewards/rejected": -0.013119394890964031,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9235498905181885,
567
+ "logits/rejected": -1.933876395225525,
568
+ "logps/chosen": -32.016578674316406,
569
+ "logps/rejected": -35.32080841064453,
570
+ "loss": 0.4297,
571
+ "rewards/accuracies": 0.699999988079071,
572
+ "rewards/chosen": 0.13167943060398102,
573
+ "rewards/margins": 0.15411263704299927,
574
+ "rewards/rejected": -0.02243318222463131,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.062828540802002,
581
+ "logits/rejected": -2.056312322616577,
582
+ "logps/chosen": -33.43348693847656,
583
+ "logps/rejected": -29.213970184326172,
584
+ "loss": 0.3963,
585
+ "rewards/accuracies": 0.6875,
586
+ "rewards/chosen": 0.15293903648853302,
587
+ "rewards/margins": 0.14422088861465454,
588
+ "rewards/rejected": 0.008718125522136688,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9224169254302979,
595
+ "logits/rejected": -1.9245796203613281,
596
+ "logps/chosen": -33.9437255859375,
597
+ "logps/rejected": -30.860538482666016,
598
+ "loss": 0.4119,
599
+ "rewards/accuracies": 0.762499988079071,
600
+ "rewards/chosen": 0.20672598481178284,
601
+ "rewards/margins": 0.19826285541057587,
602
+ "rewards/rejected": 0.008463130332529545,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.46899499707407766,
610
+ "train_runtime": 3249.86,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }