hugodk-sch commited on
Commit
9a23c87
1 Parent(s): 3165139

Model save

Browse files
Files changed (5) hide show
  1. README.md +12 -15
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +378 -378
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.3447
24
- - Rewards/chosen: 0.0141
25
- - Rewards/rejected: 0.0084
26
- - Rewards/accuracies: 0.5162
27
- - Rewards/margins: 0.0057
28
- - Logps/rejected: -37.5073
29
- - Logps/chosen: -34.0189
30
  - Logits/rejected: -2.2388
31
- - Logits/chosen: -2.2437
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.333 | 0.26 | 100 | 0.3405 | 0.0126 | 0.0118 | 0.5104 | 0.0008 | -37.5035 | -34.0205 | -2.2389 | -2.2438 |
67
- | 0.3306 | 0.52 | 200 | 0.3466 | 0.0054 | 0.0048 | 0.5021 | 0.0007 | -37.5113 | -34.0285 | -2.2386 | -2.2435 |
68
- | 0.3327 | 0.78 | 300 | 0.3276 | 0.0152 | 0.0022 | 0.5365 | 0.0131 | -37.5142 | -34.0176 | -2.2390 | -2.2439 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 1.0000
21
+ - Rewards/chosen: 0.0001
22
+ - Rewards/rejected: 0.0001
23
+ - Rewards/accuracies: 0.4988
24
+ - Rewards/margins: 0.0000
25
+ - Logps/rejected: -37.5053
26
+ - Logps/chosen: -34.0201
27
  - Logits/rejected: -2.2388
28
+ - Logits/chosen: -2.2436
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 1.0001 | 0.26 | 100 | 1.0000 | 0.0000 | 0.0001 | 0.5100 | -0.0000 | -37.5080 | -34.0296 | -2.2392 | -2.2441 |
64
+ | 0.9998 | 0.52 | 200 | 1.0000 | 0.0001 | 0.0001 | 0.5046 | -0.0000 | -37.5040 | -34.0221 | -2.2392 | -2.2440 |
65
+ | 0.9997 | 0.78 | 300 | 1.0000 | 0.0001 | 0.0001 | 0.4988 | 0.0000 | -37.5053 | -34.0201 | -2.2388 | -2.2436 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbd37e038d13d252dc8fbbff90393b02ad5b1a3acf3aaf4a91b929c037ba71a7
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10d3f20ad7c4aec2449ad1b4fd6692acb1c515aef929e5d69011238fb991cc39
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2436861991882324,
4
- "eval_logits/rejected": -2.2388129234313965,
5
- "eval_logps/chosen": -34.01886749267578,
6
- "eval_logps/rejected": -37.50727081298828,
7
- "eval_loss": 0.34474730491638184,
8
- "eval_rewards/accuracies": 0.5161960124969482,
9
- "eval_rewards/chosen": 0.01411630492657423,
10
- "eval_rewards/margins": 0.005696756765246391,
11
- "eval_rewards/rejected": 0.008419547230005264,
12
- "eval_runtime": 145.8183,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.352,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.3306902726749321,
17
- "train_runtime": 3256.1932,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.946,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.9998476387618425,
4
+ "train_runtime": 3251.6099,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.3306902726749321,
4
- "train_runtime": 3256.1932,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.9998476387618425,
4
+ "train_runtime": 3251.6099,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.3086,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.866453766822815,
29
- "logits/rejected": -1.8707627058029175,
30
- "logps/chosen": -36.98646545410156,
31
- "logps/rejected": -33.6445426940918,
32
- "loss": 0.3098,
33
- "rewards/accuracies": 0.4444444477558136,
34
- "rewards/chosen": 0.018122652545571327,
35
- "rewards/margins": 0.031144678592681885,
36
- "rewards/rejected": -0.013022023253142834,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.9970738887786865,
43
- "logits/rejected": -1.9997297525405884,
44
- "logps/chosen": -29.651851654052734,
45
- "logps/rejected": -29.048221588134766,
46
- "loss": 0.3674,
47
- "rewards/accuracies": 0.4375,
48
- "rewards/chosen": -0.008690872229635715,
49
- "rewards/margins": -0.02239001728594303,
50
- "rewards/rejected": 0.01369914598762989,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.919904351234436,
57
- "logits/rejected": -1.9172093868255615,
58
- "logps/chosen": -31.407363891601562,
59
- "logps/rejected": -33.22914505004883,
60
- "loss": 0.3289,
61
- "rewards/accuracies": 0.5874999761581421,
62
- "rewards/chosen": 0.007855755276978016,
63
- "rewards/margins": 0.017470674589276314,
64
- "rewards/rejected": -0.009614917449653149,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.016488552093506,
71
- "logits/rejected": -2.0077414512634277,
72
- "logps/chosen": -32.588050842285156,
73
- "logps/rejected": -32.51618576049805,
74
- "loss": 0.3526,
75
- "rewards/accuracies": 0.4749999940395355,
76
- "rewards/chosen": -0.010120267979800701,
77
- "rewards/margins": -0.006958496756851673,
78
- "rewards/rejected": -0.0031617730855941772,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8645082712173462,
85
- "logits/rejected": -1.8537276983261108,
86
- "logps/chosen": -33.5635871887207,
87
- "logps/rejected": -35.42169952392578,
88
- "loss": 0.3858,
89
- "rewards/accuracies": 0.38749998807907104,
90
- "rewards/chosen": -0.005497059319168329,
91
- "rewards/margins": -0.03307711333036423,
92
- "rewards/rejected": 0.027580058202147484,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9458835124969482,
99
- "logits/rejected": -1.9478065967559814,
100
- "logps/chosen": -32.57460021972656,
101
- "logps/rejected": -33.195518493652344,
102
- "loss": 0.3015,
103
- "rewards/accuracies": 0.574999988079071,
104
- "rewards/chosen": 0.02302517369389534,
105
- "rewards/margins": 0.04474987834692001,
106
- "rewards/rejected": -0.021724706515669823,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.0793328285217285,
113
- "logits/rejected": -2.084324836730957,
114
- "logps/chosen": -33.99799728393555,
115
- "logps/rejected": -36.57799530029297,
116
- "loss": 0.3658,
117
- "rewards/accuracies": 0.48750001192092896,
118
- "rewards/chosen": -0.00855917576700449,
119
- "rewards/margins": -0.00035296304849907756,
120
- "rewards/rejected": -0.008206211030483246,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9421542882919312,
127
- "logits/rejected": -1.9453080892562866,
128
- "logps/chosen": -34.38970184326172,
129
- "logps/rejected": -34.58138656616211,
130
- "loss": 0.3181,
131
- "rewards/accuracies": 0.5874999761581421,
132
- "rewards/chosen": 0.034300822764635086,
133
- "rewards/margins": 0.024381538853049278,
134
- "rewards/rejected": 0.009919276461005211,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.9507806301116943,
141
- "logits/rejected": -1.955293893814087,
142
- "logps/chosen": -32.461936950683594,
143
- "logps/rejected": -32.3816032409668,
144
- "loss": 0.3094,
145
- "rewards/accuracies": 0.574999988079071,
146
- "rewards/chosen": 0.004934861324727535,
147
- "rewards/margins": 0.02535998821258545,
148
- "rewards/rejected": -0.02042512409389019,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.0485286712646484,
155
- "logits/rejected": -2.046539783477783,
156
- "logps/chosen": -32.23391342163086,
157
- "logps/rejected": -31.265544891357422,
158
- "loss": 0.333,
159
- "rewards/accuracies": 0.512499988079071,
160
- "rewards/chosen": 0.006228716112673283,
161
- "rewards/margins": 0.005629505030810833,
162
- "rewards/rejected": 0.0005992107326164842,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.243820905685425,
168
- "eval_logits/rejected": -2.238938570022583,
169
- "eval_logps/chosen": -34.02050018310547,
170
- "eval_logps/rejected": -37.50347137451172,
171
- "eval_loss": 0.3404548168182373,
172
- "eval_rewards/accuracies": 0.5103820562362671,
173
- "eval_rewards/chosen": 0.012649022974073887,
174
- "eval_rewards/margins": 0.0008147989865392447,
175
- "eval_rewards/rejected": 0.011834223754703999,
176
- "eval_runtime": 146.2563,
177
- "eval_samples_per_second": 2.345,
178
- "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.0055313110351562,
185
- "logits/rejected": -2.00311541557312,
186
- "logps/chosen": -33.23518753051758,
187
- "logps/rejected": -34.03091812133789,
188
- "loss": 0.3233,
189
- "rewards/accuracies": 0.48750001192092896,
190
- "rewards/chosen": 0.007764431647956371,
191
- "rewards/margins": 0.009159245528280735,
192
- "rewards/rejected": -0.0013948131818324327,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.016556978225708,
199
- "logits/rejected": -2.0081825256347656,
200
- "logps/chosen": -32.435211181640625,
201
- "logps/rejected": -32.18501281738281,
202
- "loss": 0.3121,
203
- "rewards/accuracies": 0.5625,
204
- "rewards/chosen": 0.00872363243252039,
205
- "rewards/margins": 0.029877260327339172,
206
- "rewards/rejected": -0.02115362510085106,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0461266040802,
213
- "logits/rejected": -2.03806734085083,
214
- "logps/chosen": -30.502710342407227,
215
- "logps/rejected": -32.0546760559082,
216
- "loss": 0.3674,
217
- "rewards/accuracies": 0.4625000059604645,
218
- "rewards/chosen": -0.02429451420903206,
219
- "rewards/margins": -0.02279272861778736,
220
- "rewards/rejected": -0.0015017850091680884,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.9767634868621826,
227
- "logits/rejected": -1.9870494604110718,
228
- "logps/chosen": -31.402328491210938,
229
- "logps/rejected": -32.55817794799805,
230
- "loss": 0.3125,
231
- "rewards/accuracies": 0.550000011920929,
232
- "rewards/chosen": 0.015601096674799919,
233
- "rewards/margins": 0.03657008707523346,
234
- "rewards/rejected": -0.02096899226307869,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8908436298370361,
241
- "logits/rejected": -1.8919413089752197,
242
- "logps/chosen": -34.207401275634766,
243
- "logps/rejected": -34.783695220947266,
244
- "loss": 0.3373,
245
- "rewards/accuracies": 0.5375000238418579,
246
- "rewards/chosen": -0.013235817663371563,
247
- "rewards/margins": 0.01055879332125187,
248
- "rewards/rejected": -0.023794615641236305,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.942582368850708,
255
- "logits/rejected": -1.9391053915023804,
256
- "logps/chosen": -36.15548324584961,
257
- "logps/rejected": -32.729759216308594,
258
- "loss": 0.3207,
259
- "rewards/accuracies": 0.5,
260
- "rewards/chosen": 0.014574882574379444,
261
- "rewards/margins": 0.020880455151200294,
262
- "rewards/rejected": -0.006305573042482138,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.042206287384033,
269
- "logits/rejected": -2.034825086593628,
270
- "logps/chosen": -33.77631378173828,
271
- "logps/rejected": -31.35869789123535,
272
- "loss": 0.3201,
273
- "rewards/accuracies": 0.5625,
274
- "rewards/chosen": 0.019394155591726303,
275
- "rewards/margins": 0.025556519627571106,
276
- "rewards/rejected": -0.00616236450150609,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0474154949188232,
283
- "logits/rejected": -2.0526936054229736,
284
- "logps/chosen": -32.521339416503906,
285
- "logps/rejected": -32.497581481933594,
286
- "loss": 0.2999,
287
- "rewards/accuracies": 0.612500011920929,
288
- "rewards/chosen": 0.021103620529174805,
289
- "rewards/margins": 0.032301973551511765,
290
- "rewards/rejected": -0.01119835115969181,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.0485098361968994,
297
- "logits/rejected": -2.0457375049591064,
298
- "logps/chosen": -31.49017906188965,
299
- "logps/rejected": -31.345867156982422,
300
- "loss": 0.3315,
301
  "rewards/accuracies": 0.5625,
302
- "rewards/chosen": -0.001104071387089789,
303
- "rewards/margins": 0.023781923577189445,
304
- "rewards/rejected": -0.02488599717617035,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9183101654052734,
311
- "logits/rejected": -1.9229921102523804,
312
- "logps/chosen": -31.60622215270996,
313
- "logps/rejected": -32.805049896240234,
314
- "loss": 0.3306,
315
- "rewards/accuracies": 0.5249999761581421,
316
- "rewards/chosen": 5.159676220500842e-05,
317
- "rewards/margins": 0.012757277116179466,
318
- "rewards/rejected": -0.012705676257610321,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2434937953948975,
324
- "eval_logits/rejected": -2.2386233806610107,
325
- "eval_logps/chosen": -34.02851867675781,
326
- "eval_logps/rejected": -37.51131057739258,
327
- "eval_loss": 0.3466452658176422,
328
- "eval_rewards/accuracies": 0.5020764470100403,
329
- "eval_rewards/chosen": 0.005428452976047993,
330
- "eval_rewards/margins": 0.0006523749325424433,
331
- "eval_rewards/rejected": 0.00477607874199748,
332
- "eval_runtime": 145.8217,
333
- "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.032118558883667,
341
- "logits/rejected": -2.042782783508301,
342
- "logps/chosen": -31.939167022705078,
343
- "logps/rejected": -33.88642501831055,
344
- "loss": 0.2871,
345
- "rewards/accuracies": 0.6000000238418579,
346
- "rewards/chosen": 0.02892182394862175,
347
- "rewards/margins": 0.050276655703783035,
348
- "rewards/rejected": -0.021354828029870987,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.925293207168579,
355
- "logits/rejected": -1.9401395320892334,
356
- "logps/chosen": -30.106586456298828,
357
- "logps/rejected": -31.56791114807129,
358
- "loss": 0.3327,
359
- "rewards/accuracies": 0.6000000238418579,
360
- "rewards/chosen": 0.0044566914439201355,
361
- "rewards/margins": 0.013218318112194538,
362
- "rewards/rejected": -0.008761629462242126,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.9831531047821045,
369
- "logits/rejected": -1.9871160984039307,
370
- "logps/chosen": -33.386024475097656,
371
- "logps/rejected": -31.5727481842041,
372
- "loss": 0.2974,
373
- "rewards/accuracies": 0.550000011920929,
374
- "rewards/chosen": 0.028298119083046913,
375
- "rewards/margins": 0.04939670115709305,
376
- "rewards/rejected": -0.021098580211400986,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9833450317382812,
383
- "logits/rejected": -1.9613698720932007,
384
- "logps/chosen": -34.1508903503418,
385
- "logps/rejected": -34.95817947387695,
386
- "loss": 0.3388,
387
- "rewards/accuracies": 0.5249999761581421,
388
- "rewards/chosen": 0.002569653559476137,
389
- "rewards/margins": 0.00921549927443266,
390
- "rewards/rejected": -0.006645847111940384,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0248680114746094,
397
- "logits/rejected": -2.021559238433838,
398
- "logps/chosen": -32.907005310058594,
399
- "logps/rejected": -36.22737121582031,
400
- "loss": 0.3221,
401
- "rewards/accuracies": 0.550000011920929,
402
- "rewards/chosen": 0.016254058107733727,
403
- "rewards/margins": 0.018458303064107895,
404
- "rewards/rejected": -0.002204245189204812,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.8913663625717163,
411
- "logits/rejected": -1.8889217376708984,
412
- "logps/chosen": -34.18629455566406,
413
- "logps/rejected": -35.490962982177734,
414
- "loss": 0.3601,
415
- "rewards/accuracies": 0.5,
416
- "rewards/chosen": 0.0018952053505927324,
417
- "rewards/margins": -0.010397391393780708,
418
- "rewards/rejected": 0.012292595580220222,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.8756721019744873,
425
- "logits/rejected": -1.8731330633163452,
426
- "logps/chosen": -34.39625549316406,
427
- "logps/rejected": -31.76236343383789,
428
- "loss": 0.3492,
429
- "rewards/accuracies": 0.4625000059604645,
430
- "rewards/chosen": 0.001211274997331202,
431
- "rewards/margins": -0.006859898567199707,
432
- "rewards/rejected": 0.008071173913776875,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9795773029327393,
439
- "logits/rejected": -1.9689537286758423,
440
- "logps/chosen": -35.320404052734375,
441
- "logps/rejected": -31.86062240600586,
442
- "loss": 0.298,
443
- "rewards/accuracies": 0.574999988079071,
444
- "rewards/chosen": 0.032611697912216187,
445
- "rewards/margins": 0.05027870461344719,
446
- "rewards/rejected": -0.017666997388005257,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.075582981109619,
453
- "logits/rejected": -2.0605711936950684,
454
- "logps/chosen": -30.907567977905273,
455
- "logps/rejected": -32.65256881713867,
456
- "loss": 0.3186,
457
- "rewards/accuracies": 0.512499988079071,
458
- "rewards/chosen": 0.014904007315635681,
459
- "rewards/margins": 0.015564213506877422,
460
- "rewards/rejected": -0.0006602049106732011,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.9468443393707275,
467
- "logits/rejected": -1.944310188293457,
468
- "logps/chosen": -32.90850067138672,
469
- "logps/rejected": -30.801532745361328,
470
- "loss": 0.3327,
471
- "rewards/accuracies": 0.4749999940395355,
472
- "rewards/chosen": 0.016746358945965767,
473
- "rewards/margins": 0.002998964861035347,
474
- "rewards/rejected": 0.01374739408493042,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.243875741958618,
480
- "eval_logits/rejected": -2.238999366760254,
481
- "eval_logps/chosen": -34.01763916015625,
482
- "eval_logps/rejected": -37.51422882080078,
483
- "eval_loss": 0.32764142751693726,
484
- "eval_rewards/accuracies": 0.5365448594093323,
485
- "eval_rewards/chosen": 0.015219501219689846,
486
- "eval_rewards/margins": 0.01306714303791523,
487
- "eval_rewards/rejected": 0.0021523565519601107,
488
- "eval_runtime": 145.9076,
489
- "eval_samples_per_second": 2.351,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9285104274749756,
497
- "logits/rejected": -1.925238847732544,
498
- "logps/chosen": -31.57692527770996,
499
- "logps/rejected": -33.744144439697266,
500
- "loss": 0.3143,
501
- "rewards/accuracies": 0.550000011920929,
502
- "rewards/chosen": 0.023783722892403603,
503
- "rewards/margins": 0.025577496737241745,
504
- "rewards/rejected": -0.0017937703523784876,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.980386734008789,
511
- "logits/rejected": -1.9680719375610352,
512
- "logps/chosen": -34.57415771484375,
513
- "logps/rejected": -33.55961227416992,
514
- "loss": 0.31,
515
  "rewards/accuracies": 0.5375000238418579,
516
- "rewards/chosen": 0.01972355879843235,
517
- "rewards/margins": 0.03841744735836983,
518
- "rewards/rejected": -0.018693890422582626,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.015927314758301,
525
- "logits/rejected": -2.014465570449829,
526
- "logps/chosen": -33.47549057006836,
527
- "logps/rejected": -32.47983169555664,
528
- "loss": 0.3454,
529
- "rewards/accuracies": 0.512499988079071,
530
- "rewards/chosen": 0.009270393289625645,
531
- "rewards/margins": 0.009491671808063984,
532
- "rewards/rejected": -0.0002212822437286377,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.102839946746826,
539
- "logits/rejected": -2.0870471000671387,
540
- "logps/chosen": -34.18696212768555,
541
- "logps/rejected": -33.075828552246094,
542
- "loss": 0.4092,
543
- "rewards/accuracies": 0.4124999940395355,
544
- "rewards/chosen": -0.006814646068960428,
545
- "rewards/margins": -0.04479615017771721,
546
- "rewards/rejected": 0.03798150271177292,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9747005701065063,
553
- "logits/rejected": -1.9737575054168701,
554
- "logps/chosen": -33.231842041015625,
555
- "logps/rejected": -32.461700439453125,
556
- "loss": 0.3207,
557
- "rewards/accuracies": 0.5249999761581421,
558
- "rewards/chosen": 0.04523162543773651,
559
- "rewards/margins": 0.04212886095046997,
560
- "rewards/rejected": 0.003102770773693919,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9306046962738037,
567
- "logits/rejected": -1.9409637451171875,
568
- "logps/chosen": -32.217220306396484,
569
- "logps/rejected": -35.306907653808594,
570
- "loss": 0.3411,
571
- "rewards/accuracies": 0.4625000059604645,
572
- "rewards/chosen": -0.011275654658675194,
573
- "rewards/margins": 0.005062489304691553,
574
- "rewards/rejected": -0.01633814536035061,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.0694775581359863,
581
- "logits/rejected": -2.0629212856292725,
582
- "logps/chosen": -33.65714645385742,
583
- "logps/rejected": -29.221160888671875,
584
- "loss": 0.3549,
585
- "rewards/accuracies": 0.4749999940395355,
586
- "rewards/chosen": -0.004660401493310928,
587
- "rewards/margins": -0.009401795454323292,
588
- "rewards/rejected": 0.0047413939610123634,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.9296157360076904,
595
- "logits/rejected": -1.931777000427246,
596
- "logps/chosen": -34.22825241088867,
597
- "logps/rejected": -30.912120819091797,
598
- "loss": 0.2952,
599
- "rewards/accuracies": 0.5375000238418579,
600
- "rewards/chosen": 0.009714365005493164,
601
- "rewards/margins": 0.04525749385356903,
602
- "rewards/rejected": -0.035543132573366165,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.3306902726749321,
610
- "train_runtime": 3256.1932,
611
- "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.8663256168365479,
29
+ "logits/rejected": -1.8706417083740234,
30
+ "logps/chosen": -36.98673629760742,
31
+ "logps/rejected": -33.66697311401367,
32
+ "loss": 0.9994,
33
+ "rewards/accuracies": 0.5,
34
+ "rewards/chosen": 0.00019861094187945127,
35
+ "rewards/margins": 0.0005675733555108309,
36
+ "rewards/rejected": -0.0003689624136313796,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.997527837753296,
43
+ "logits/rejected": -2.0001606941223145,
44
+ "logps/chosen": -29.65478515625,
45
+ "logps/rejected": -29.070556640625,
46
+ "loss": 1.0001,
47
+ "rewards/accuracies": 0.44999998807907104,
48
+ "rewards/chosen": -0.00012590530968736857,
49
+ "rewards/margins": -5.477964805322699e-05,
50
+ "rewards/rejected": -7.112567982403561e-05,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.9195702075958252,
57
+ "logits/rejected": -1.9168812036514282,
58
+ "logps/chosen": -31.421009063720703,
59
+ "logps/rejected": -33.225616455078125,
60
+ "loss": 1.0,
61
+ "rewards/accuracies": 0.48750001192092896,
62
+ "rewards/chosen": -4.912544318358414e-05,
63
+ "rewards/margins": 2.244002462248318e-05,
64
+ "rewards/rejected": -7.15654605301097e-05,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.0165441036224365,
71
+ "logits/rejected": -2.0078024864196777,
72
+ "logps/chosen": -32.57805252075195,
73
+ "logps/rejected": -32.504085540771484,
74
+ "loss": 1.0001,
75
+ "rewards/accuracies": 0.4375,
76
+ "rewards/chosen": -1.2443718333088327e-05,
77
+ "rewards/margins": -9.829048212850466e-05,
78
+ "rewards/rejected": 8.584677561884746e-05,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.8641563653945923,
85
+ "logits/rejected": -1.8533931970596313,
86
+ "logps/chosen": -33.555824279785156,
87
+ "logps/rejected": -35.42582321166992,
88
+ "loss": 1.0002,
89
+ "rewards/accuracies": 0.36250001192092896,
90
+ "rewards/chosen": 1.6570575098739937e-05,
91
+ "rewards/margins": -0.0002486651937942952,
92
+ "rewards/rejected": 0.000265235808910802,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9452263116836548,
99
+ "logits/rejected": -1.9471622705459595,
100
+ "logps/chosen": -32.5916633605957,
101
+ "logps/rejected": -33.1885871887207,
102
+ "loss": 0.9997,
103
+ "rewards/accuracies": 0.6625000238418579,
104
+ "rewards/chosen": 8.520439587300643e-05,
105
+ "rewards/margins": 0.00025727329193614423,
106
+ "rewards/rejected": -0.0001720689469948411,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.0798425674438477,
113
+ "logits/rejected": -2.08483624458313,
114
+ "logps/chosen": -33.98626708984375,
115
+ "logps/rejected": -36.57551956176758,
116
+ "loss": 0.9999,
117
+ "rewards/accuracies": 0.512499988079071,
118
+ "rewards/chosen": 2.217638393631205e-05,
119
+ "rewards/margins": 8.864814299158752e-05,
120
+ "rewards/rejected": -6.647173722740263e-05,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.942161202430725,
127
+ "logits/rejected": -1.9453051090240479,
128
+ "logps/chosen": -34.400901794433594,
129
+ "logps/rejected": -34.57304382324219,
130
+ "loss": 0.9999,
131
+ "rewards/accuracies": 0.5249999761581421,
132
+ "rewards/chosen": 0.00026908880681730807,
133
+ "rewards/margins": 7.544312393292785e-05,
134
+ "rewards/rejected": 0.000193645668332465,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.9505256414413452,
141
+ "logits/rejected": -1.9550361633300781,
142
+ "logps/chosen": -32.452125549316406,
143
+ "logps/rejected": -32.35675811767578,
144
+ "loss": 0.9999,
145
+ "rewards/accuracies": 0.5874999761581421,
146
+ "rewards/chosen": 0.00015296679339371622,
147
+ "rewards/margins": 0.0001315164554398507,
148
+ "rewards/rejected": 2.145032885891851e-05,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.0490074157714844,
155
+ "logits/rejected": -2.0470099449157715,
156
+ "logps/chosen": -32.25472640991211,
157
+ "logps/rejected": -31.2731990814209,
158
+ "loss": 1.0001,
159
+ "rewards/accuracies": 0.48750001192092896,
160
+ "rewards/chosen": -0.00013893461436964571,
161
+ "rewards/margins": -6.904623296577483e-05,
162
+ "rewards/rejected": -6.98883886798285e-05,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2441036701202393,
168
+ "eval_logits/rejected": -2.239219903945923,
169
+ "eval_logps/chosen": -34.02959442138672,
170
+ "eval_logps/rejected": -37.50800704956055,
171
+ "eval_loss": 1.0000391006469727,
172
+ "eval_rewards/accuracies": 0.5099667906761169,
173
+ "eval_rewards/chosen": 4.9558610044186935e-05,
174
+ "eval_rewards/margins": -3.6566227208822966e-05,
175
+ "eval_rewards/rejected": 8.61248336150311e-05,
176
+ "eval_runtime": 145.9645,
177
+ "eval_samples_per_second": 2.35,
178
+ "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.005286931991577,
185
+ "logits/rejected": -2.0028929710388184,
186
+ "logps/chosen": -33.22523498535156,
187
+ "logps/rejected": -34.039817810058594,
188
+ "loss": 0.9997,
189
+ "rewards/accuracies": 0.5249999761581421,
190
+ "rewards/chosen": 0.00018582009943202138,
191
+ "rewards/margins": 0.0002903031127061695,
192
+ "rewards/rejected": -0.00010448300599819049,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.0164072513580322,
199
+ "logits/rejected": -2.008033514022827,
200
+ "logps/chosen": -32.473182678222656,
201
+ "logps/rejected": -32.18474197387695,
202
+ "loss": 1.0001,
203
+ "rewards/accuracies": 0.4625000059604645,
204
+ "rewards/chosen": -0.00028277214732952416,
205
+ "rewards/margins": -5.0430990086169913e-05,
206
+ "rewards/rejected": -0.00023234116088133305,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.0463318824768066,
213
+ "logits/rejected": -2.03826642036438,
214
+ "logps/chosen": -30.479427337646484,
215
+ "logps/rejected": -32.030479431152344,
216
+ "loss": 1.0003,
217
+ "rewards/accuracies": 0.4000000059604645,
218
+ "rewards/chosen": -3.712148100021295e-05,
219
+ "rewards/margins": -0.00026237856945954263,
220
+ "rewards/rejected": 0.0002252570993732661,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.9770317077636719,
227
+ "logits/rejected": -1.987318754196167,
228
+ "logps/chosen": -31.41189193725586,
229
+ "logps/rejected": -32.552860260009766,
230
+ "loss": 0.9997,
231
+ "rewards/accuracies": 0.4625000059604645,
232
+ "rewards/chosen": 7.772201206535101e-05,
233
+ "rewards/margins": 0.000257515290286392,
234
+ "rewards/rejected": -0.00017979330732487142,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.8904685974121094,
241
+ "logits/rejected": -1.8915725946426392,
242
+ "logps/chosen": -34.178749084472656,
243
+ "logps/rejected": -34.7522087097168,
244
+ "loss": 0.9999,
245
+ "rewards/accuracies": 0.44999998807907104,
246
+ "rewards/chosen": 0.00013941082579549402,
247
+ "rewards/margins": 8.894065831555054e-05,
248
+ "rewards/rejected": 5.047018566983752e-05,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.9424184560775757,
255
+ "logits/rejected": -1.9389402866363525,
256
+ "logps/chosen": -36.14480209350586,
257
+ "logps/rejected": -32.753517150878906,
258
+ "loss": 0.9994,
259
+ "rewards/accuracies": 0.5249999761581421,
260
+ "rewards/chosen": 0.0002687961678020656,
261
+ "rewards/margins": 0.0005764620145782828,
262
+ "rewards/rejected": -0.0003076658467762172,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.0420711040496826,
269
+ "logits/rejected": -2.0346856117248535,
270
+ "logps/chosen": -33.783363342285156,
271
+ "logps/rejected": -31.349674224853516,
272
+ "loss": 0.9999,
273
+ "rewards/accuracies": 0.5249999761581421,
274
+ "rewards/chosen": 0.00014495299546979368,
275
+ "rewards/margins": 0.00012318804510869086,
276
+ "rewards/rejected": 2.1765008568763733e-05,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.0475425720214844,
283
+ "logits/rejected": -2.052799940109253,
284
+ "logps/chosen": -32.50774383544922,
285
+ "logps/rejected": -32.48485565185547,
286
+ "loss": 0.9996,
287
+ "rewards/accuracies": 0.574999988079071,
288
+ "rewards/chosen": 0.00037043425254523754,
289
+ "rewards/margins": 0.0003675749758258462,
290
+ "rewards/rejected": 2.859340838767821e-06,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.0484375953674316,
297
+ "logits/rejected": -2.045671224594116,
298
+ "logps/chosen": -31.50165367126465,
299
+ "logps/rejected": -31.33974266052246,
300
+ "loss": 0.9999,
301
  "rewards/accuracies": 0.5625,
302
+ "rewards/chosen": -0.00012700485240202397,
303
+ "rewards/margins": 8.82898602867499e-05,
304
+ "rewards/rejected": -0.00021529469813685864,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9182907342910767,
311
+ "logits/rejected": -1.9229600429534912,
312
+ "logps/chosen": -31.5924072265625,
313
+ "logps/rejected": -32.794044494628906,
314
+ "loss": 0.9998,
315
+ "rewards/accuracies": 0.512499988079071,
316
+ "rewards/chosen": 0.00013877781748306006,
317
+ "rewards/margins": 0.0001699000713415444,
318
+ "rewards/rejected": -3.112227204837836e-05,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2440481185913086,
324
+ "eval_logits/rejected": -2.2391700744628906,
325
+ "eval_logps/chosen": -34.02214813232422,
326
+ "eval_logps/rejected": -37.503997802734375,
327
+ "eval_loss": 1.0000054836273193,
328
+ "eval_rewards/accuracies": 0.5045680999755859,
329
+ "eval_rewards/chosen": 0.00012402697757352144,
330
+ "eval_rewards/margins": -2.209126932939398e-06,
331
+ "eval_rewards/rejected": 0.00012623611837625504,
332
+ "eval_runtime": 145.6161,
333
+ "eval_samples_per_second": 2.356,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0312139987945557,
341
+ "logits/rejected": -2.041886568069458,
342
+ "logps/chosen": -31.938335418701172,
343
+ "logps/rejected": -33.87850570678711,
344
+ "loss": 0.9995,
345
+ "rewards/accuracies": 0.574999988079071,
346
+ "rewards/chosen": 0.00032970920437946916,
347
+ "rewards/margins": 0.0004878062754869461,
348
+ "rewards/rejected": -0.00015809701289981604,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.925606369972229,
355
+ "logits/rejected": -1.940452218055725,
356
+ "logps/chosen": -30.110549926757812,
357
+ "logps/rejected": -31.573543548583984,
358
+ "loss": 0.9998,
359
+ "rewards/accuracies": 0.5874999761581421,
360
+ "rewards/chosen": 9.872082955553196e-06,
361
+ "rewards/margins": 0.00016354399849660695,
362
+ "rewards/rejected": -0.00015367190644610673,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.983096718788147,
369
+ "logits/rejected": -1.9870779514312744,
370
+ "logps/chosen": -33.37377166748047,
371
+ "logps/rejected": -31.558422088623047,
372
+ "loss": 0.9995,
373
+ "rewards/accuracies": 0.574999988079071,
374
+ "rewards/chosen": 0.0004369509988464415,
375
+ "rewards/margins": 0.0005281810299493372,
376
+ "rewards/rejected": -9.123003837885335e-05,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.9832874536514282,
383
+ "logits/rejected": -1.9613001346588135,
384
+ "logps/chosen": -34.15814971923828,
385
+ "logps/rejected": -34.962947845458984,
386
+ "loss": 0.9999,
387
+ "rewards/accuracies": 0.5,
388
+ "rewards/chosen": -4.397642987896688e-05,
389
+ "rewards/margins": 7.76300803408958e-05,
390
+ "rewards/rejected": -0.00012160651385784149,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.024568557739258,
397
+ "logits/rejected": -2.021247386932373,
398
+ "logps/chosen": -32.91596221923828,
399
+ "logps/rejected": -36.22909164428711,
400
+ "loss": 0.9999,
401
+ "rewards/accuracies": 0.48750001192092896,
402
+ "rewards/chosen": 9.10547751118429e-05,
403
+ "rewards/margins": 0.00013272995420265943,
404
+ "rewards/rejected": -4.1675208194646984e-05,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.891326904296875,
411
+ "logits/rejected": -1.8888810873031616,
412
+ "logps/chosen": -34.165008544921875,
413
+ "logps/rejected": -35.511131286621094,
414
+ "loss": 0.9997,
415
+ "rewards/accuracies": 0.550000011920929,
416
+ "rewards/chosen": 0.00023390687420032918,
417
+ "rewards/margins": 0.00029896467458456755,
418
+ "rewards/rejected": -6.505780766019598e-05,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.8756563663482666,
425
+ "logits/rejected": -1.87309992313385,
426
+ "logps/chosen": -34.39522171020508,
427
+ "logps/rejected": -31.7664852142334,
428
+ "loss": 1.0,
429
+ "rewards/accuracies": 0.44999998807907104,
430
+ "rewards/chosen": 2.3789130864315666e-05,
431
+ "rewards/margins": -2.4663497242727317e-05,
432
+ "rewards/rejected": 4.8452602641191334e-05,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.9794845581054688,
439
+ "logits/rejected": -1.9688661098480225,
440
+ "logps/chosen": -35.318504333496094,
441
+ "logps/rejected": -31.839763641357422,
442
+ "loss": 0.9996,
443
+ "rewards/accuracies": 0.512499988079071,
444
+ "rewards/chosen": 0.0003813380899373442,
445
+ "rewards/margins": 0.0003690699231810868,
446
+ "rewards/rejected": 1.2268178579688538e-05,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.075482130050659,
453
+ "logits/rejected": -2.060464859008789,
454
+ "logps/chosen": -30.921621322631836,
455
+ "logps/rejected": -32.64299774169922,
456
+ "loss": 1.0001,
457
+ "rewards/accuracies": 0.48750001192092896,
458
+ "rewards/chosen": 2.502280767657794e-05,
459
+ "rewards/margins": -6.332028715405613e-05,
460
+ "rewards/rejected": 8.834306936478242e-05,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.9463533163070679,
467
+ "logits/rejected": -1.9438135623931885,
468
+ "logps/chosen": -32.896881103515625,
469
+ "logps/rejected": -30.816125869750977,
470
+ "loss": 0.9997,
471
+ "rewards/accuracies": 0.5874999761581421,
472
+ "rewards/chosen": 0.0003022877499461174,
473
+ "rewards/margins": 0.0002954433439299464,
474
+ "rewards/rejected": 6.84439146425575e-06,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.243636131286621,
480
+ "eval_logits/rejected": -2.2387540340423584,
481
+ "eval_logps/chosen": -34.02006530761719,
482
+ "eval_logps/rejected": -37.50526428222656,
483
+ "eval_loss": 0.9999700784683228,
484
+ "eval_rewards/accuracies": 0.4987541437149048,
485
+ "eval_rewards/chosen": 0.0001448561524739489,
486
+ "eval_rewards/margins": 3.130395998596214e-05,
487
+ "eval_rewards/rejected": 0.00011355219612596557,
488
+ "eval_runtime": 145.6582,
489
+ "eval_samples_per_second": 2.355,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.9288015365600586,
497
+ "logits/rejected": -1.9255383014678955,
498
+ "logps/chosen": -31.57794189453125,
499
+ "logps/rejected": -33.73435592651367,
500
+ "loss": 0.9998,
501
+ "rewards/accuracies": 0.574999988079071,
502
+ "rewards/chosen": 0.000254074577242136,
503
+ "rewards/margins": 0.00017612635565456003,
504
+ "rewards/rejected": 7.794820703566074e-05,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.9808727502822876,
511
+ "logits/rejected": -1.9685646295547485,
512
+ "logps/chosen": -34.54682159423828,
513
+ "logps/rejected": -33.555274963378906,
514
+ "loss": 0.9993,
515
  "rewards/accuracies": 0.5375000238418579,
516
+ "rewards/chosen": 0.0004924900131300092,
517
+ "rewards/margins": 0.0006568533135578036,
518
+ "rewards/rejected": -0.00016436330042779446,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.0161168575286865,
525
+ "logits/rejected": -2.0146450996398926,
526
+ "logps/chosen": -33.46837615966797,
527
+ "logps/rejected": -32.443458557128906,
528
+ "loss": 1.0002,
529
+ "rewards/accuracies": 0.48750001192092896,
530
+ "rewards/chosen": 0.00017420158837921917,
531
+ "rewards/margins": -0.00018708205607254058,
532
+ "rewards/rejected": 0.00036128362989984453,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.102954626083374,
539
+ "logits/rejected": -2.087148666381836,
540
+ "logps/chosen": -34.159751892089844,
541
+ "logps/rejected": -33.093017578125,
542
+ "loss": 1.0001,
543
+ "rewards/accuracies": 0.512499988079071,
544
+ "rewards/chosen": 0.00019638515368569642,
545
+ "rewards/margins": -5.3783714974997565e-05,
546
+ "rewards/rejected": 0.0002501688722986728,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.974347710609436,
553
+ "logits/rejected": -1.9733997583389282,
554
+ "logps/chosen": -33.24498748779297,
555
+ "logps/rejected": -32.44942855834961,
556
+ "loss": 0.9998,
557
+ "rewards/accuracies": 0.48750001192092896,
558
+ "rewards/chosen": 0.00037107508978806436,
559
+ "rewards/margins": 0.0002138734853360802,
560
+ "rewards/rejected": 0.00015720158990006894,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9305756092071533,
567
+ "logits/rejected": -1.9409431219100952,
568
+ "logps/chosen": -32.20996856689453,
569
+ "logps/rejected": -35.26984405517578,
570
+ "loss": 1.0002,
571
+ "rewards/accuracies": 0.4749999940395355,
572
+ "rewards/chosen": -5.276153751765378e-05,
573
+ "rewards/margins": -0.00024183676578104496,
574
+ "rewards/rejected": 0.00018907521734945476,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.0696487426757812,
581
+ "logits/rejected": -2.0630972385406494,
582
+ "logps/chosen": -33.643394470214844,
583
+ "logps/rejected": -29.242889404296875,
584
+ "loss": 0.9997,
585
+ "rewards/accuracies": 0.550000011920929,
586
+ "rewards/chosen": 8.579110726714134e-05,
587
+ "rewards/margins": 0.0002504353760741651,
588
+ "rewards/rejected": -0.00016464429791085422,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.9290872812271118,
595
+ "logits/rejected": -1.9312543869018555,
596
+ "logps/chosen": -34.22844696044922,
597
+ "logps/rejected": -30.902618408203125,
598
+ "loss": 0.9996,
599
+ "rewards/accuracies": 0.5874999761581421,
600
+ "rewards/chosen": 0.00010604139242786914,
601
+ "rewards/margins": 0.0004059396160300821,
602
+ "rewards/rejected": -0.00029989826725795865,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.9998476387618425,
610
+ "train_runtime": 3251.6099,
611
+ "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],