hugodk-sch commited on
Commit
a642673
1 Parent(s): 194dad0

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +372 -372
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 24.8075
24
- - Rewards/chosen: -0.0054
25
- - Rewards/rejected: -0.0092
26
- - Rewards/accuracies: 0.5133
27
- - Rewards/margins: 0.0038
28
- - Logps/rejected: -37.6085
29
- - Logps/chosen: -34.0885
30
- - Logits/rejected: -2.2234
31
- - Logits/chosen: -2.2282
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 23.9156 | 0.26 | 100 | 24.9323 | -0.0015 | -0.0033 | 0.5282 | 0.0018 | -37.5501 | -34.0500 | -2.2289 | -2.2338 |
67
- | 22.4879 | 0.52 | 200 | 24.7992 | -0.0025 | -0.0061 | 0.5598 | 0.0037 | -37.5780 | -34.0593 | -2.2258 | -2.2307 |
68
- | 19.8329 | 0.78 | 300 | 24.9557 | -0.0066 | -0.0089 | 0.5216 | 0.0023 | -37.6060 | -34.1010 | -2.2229 | -2.2278 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.9961
21
+ - Rewards/chosen: -0.0016
22
+ - Rewards/rejected: -0.0055
23
+ - Rewards/accuracies: 0.5594
24
+ - Rewards/margins: 0.0039
25
+ - Logps/rejected: -37.5717
26
+ - Logps/chosen: -34.0508
27
+ - Logits/rejected: -2.2195
28
+ - Logits/chosen: -2.2243
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.9851 | 0.26 | 100 | 0.9973 | 0.0013 | -0.0014 | 0.5311 | 0.0027 | -37.5310 | -34.0218 | -2.2257 | -2.2305 |
64
+ | 0.9596 | 0.52 | 200 | 0.9944 | 0.0028 | -0.0028 | 0.5336 | 0.0056 | -37.5450 | -34.0069 | -2.2237 | -2.2285 |
65
+ | 0.9305 | 0.78 | 300 | 0.9961 | -0.0016 | -0.0055 | 0.5594 | 0.0039 | -37.5717 | -34.0508 | -2.2195 | -2.2243 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:076e169daae610676a71ed19e044c259301ea551232ff4f3bbefb889b8087358
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02f4916c6df444a6fa927d97ccaacdbdb852ca684cbeab3638897a126af5c4eb
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.228182792663574,
4
- "eval_logits/rejected": -2.2233598232269287,
5
- "eval_logps/chosen": -34.08854293823242,
6
- "eval_logps/rejected": -37.60850143432617,
7
- "eval_loss": 24.80753517150879,
8
- "eval_rewards/accuracies": 0.5132890343666077,
9
- "eval_rewards/chosen": -0.005399257410317659,
10
- "eval_rewards/margins": 0.0037888663355261087,
11
- "eval_rewards/rejected": -0.009188123047351837,
12
- "eval_runtime": 145.6001,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.356,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 22.846377404943688,
17
- "train_runtime": 3252.1936,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.9734619264478808,
4
+ "train_runtime": 3253.307,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 22.846377404943688,
4
- "train_runtime": 3252.1936,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.9734619264478808,
4
+ "train_runtime": 3253.307,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 25.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,155 +25,155 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.86653733253479,
29
- "logits/rejected": -1.8708631992340088,
30
- "logps/chosen": -36.98527526855469,
31
- "logps/rejected": -33.67702865600586,
32
- "loss": 24.351,
33
- "rewards/accuracies": 0.5555555820465088,
34
- "rewards/chosen": 0.0021321007516235113,
35
- "rewards/margins": 0.006827179808169603,
36
- "rewards/rejected": -0.004695079289376736,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9982036352157593,
43
- "logits/rejected": -2.0008480548858643,
44
- "logps/chosen": -29.6201114654541,
45
- "logps/rejected": -29.028329849243164,
46
- "loss": 25.1673,
47
- "rewards/accuracies": 0.42500001192092896,
48
- "rewards/chosen": 0.002208721823990345,
49
- "rewards/margins": -0.001302852644585073,
50
- "rewards/rejected": 0.00351157458499074,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9204835891723633,
57
- "logits/rejected": -1.917790174484253,
58
- "logps/chosen": -31.419384002685547,
59
- "logps/rejected": -33.234046936035156,
60
- "loss": 24.926,
61
- "rewards/accuracies": 0.5249999761581421,
62
- "rewards/chosen": -0.00032898251083679497,
63
- "rewards/margins": 0.001229552668519318,
64
- "rewards/rejected": -0.0015585350338369608,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0177228450775146,
71
- "logits/rejected": -2.008981227874756,
72
- "logps/chosen": -32.599449157714844,
73
- "logps/rejected": -32.504905700683594,
74
- "loss": 25.3506,
75
- "rewards/accuracies": 0.42500001192092896,
76
- "rewards/chosen": -0.0022641182877123356,
77
- "rewards/margins": -0.0030405251309275627,
78
- "rewards/rejected": 0.0007764073088765144,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8622967004776,
85
- "logits/rejected": -1.8515253067016602,
86
- "logps/chosen": -33.548858642578125,
87
- "logps/rejected": -35.451934814453125,
88
- "loss": 24.9661,
89
- "rewards/accuracies": 0.512499988079071,
90
- "rewards/chosen": 0.0008619143627583981,
91
- "rewards/margins": 0.0008210102096199989,
92
- "rewards/rejected": 4.09040367230773e-05,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.941347360610962,
99
- "logits/rejected": -1.9432964324951172,
100
- "logps/chosen": -32.533042907714844,
101
- "logps/rejected": -33.22543716430664,
102
- "loss": 23.9783,
103
- "rewards/accuracies": 0.6625000238418579,
104
- "rewards/chosen": 0.006714025977998972,
105
- "rewards/margins": 0.012119540013372898,
106
- "rewards/rejected": -0.005405513569712639,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0721096992492676,
113
- "logits/rejected": -2.077080249786377,
114
- "logps/chosen": -33.98393249511719,
115
- "logps/rejected": -36.645912170410156,
116
- "loss": 24.3657,
117
  "rewards/accuracies": 0.5625,
118
- "rewards/chosen": 0.0004553424078039825,
119
- "rewards/margins": 0.008159191347658634,
120
- "rewards/rejected": -0.00770384818315506,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9330637454986572,
127
- "logits/rejected": -1.9362014532089233,
128
- "logps/chosen": -34.31563949584961,
129
- "logps/rejected": -34.64789581298828,
130
- "loss": 23.5462,
131
- "rewards/accuracies": 0.637499988079071,
132
- "rewards/chosen": 0.011217002756893635,
133
- "rewards/margins": 0.01676579937338829,
134
- "rewards/rejected": -0.005548796150833368,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9414646625518799,
141
- "logits/rejected": -1.9459855556488037,
142
- "logps/chosen": -32.386444091796875,
143
- "logps/rejected": -32.36344528198242,
144
- "loss": 24.298,
145
- "rewards/accuracies": 0.637499988079071,
146
- "rewards/chosen": 0.008097763173282146,
147
- "rewards/margins": 0.008551515638828278,
148
- "rewards/rejected": -0.0004537526401691139,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.038987636566162,
155
- "logits/rejected": -2.03700590133667,
156
- "logps/chosen": -32.161869049072266,
157
- "logps/rejected": -31.31038475036621,
158
- "loss": 23.9156,
159
- "rewards/accuracies": 0.637499988079071,
160
- "rewards/chosen": 0.007896887138485909,
161
- "rewards/margins": 0.012314335443079472,
162
- "rewards/rejected": -0.004417449701577425,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2337892055511475,
168
- "eval_logits/rejected": -2.228935956954956,
169
- "eval_logps/chosen": -34.049957275390625,
170
- "eval_logps/rejected": -37.550052642822266,
171
- "eval_loss": 24.932252883911133,
172
- "eval_rewards/accuracies": 0.5282392501831055,
173
- "eval_rewards/chosen": -0.001540520810522139,
174
- "eval_rewards/margins": 0.0018024941673502326,
175
- "eval_rewards/rejected": -0.0033430152107030153,
176
- "eval_runtime": 145.9828,
177
  "eval_samples_per_second": 2.35,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
@@ -181,434 +181,434 @@
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9933013916015625,
185
- "logits/rejected": -1.9909241199493408,
186
- "logps/chosen": -33.13324737548828,
187
- "logps/rejected": -34.06130599975586,
188
- "loss": 24.1131,
189
  "rewards/accuracies": 0.612500011920929,
190
- "rewards/chosen": 0.011056952178478241,
191
- "rewards/margins": 0.01425047405064106,
192
- "rewards/rejected": -0.003193522337824106,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0050952434539795,
199
- "logits/rejected": -1.9967548847198486,
200
- "logps/chosen": -32.3184814453125,
201
- "logps/rejected": -32.16117477416992,
202
- "loss": 23.9805,
203
- "rewards/accuracies": 0.6625000238418579,
204
- "rewards/chosen": 0.012642547488212585,
205
- "rewards/margins": 0.012609380297362804,
206
- "rewards/rejected": 3.316625952720642e-05,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.033102512359619,
213
- "logits/rejected": -2.025144100189209,
214
- "logps/chosen": -30.341167449951172,
215
- "logps/rejected": -32.081993103027344,
216
- "loss": 23.735,
217
- "rewards/accuracies": 0.6499999761581421,
218
- "rewards/chosen": 0.013454675674438477,
219
- "rewards/margins": 0.016353728249669075,
220
- "rewards/rejected": -0.0028990507125854492,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9635530710220337,
227
- "logits/rejected": -1.9737846851348877,
228
- "logps/chosen": -31.221776962280273,
229
- "logps/rejected": -32.5506477355957,
230
- "loss": 23.1662,
231
- "rewards/accuracies": 0.625,
232
- "rewards/chosen": 0.019788404926657677,
233
- "rewards/margins": 0.021364931017160416,
234
- "rewards/rejected": -0.0015765223652124405,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.875128984451294,
241
- "logits/rejected": -1.876308798789978,
242
- "logps/chosen": -33.92096710205078,
243
- "logps/rejected": -34.793277740478516,
244
- "loss": 22.4884,
245
- "rewards/accuracies": 0.6000000238418579,
246
- "rewards/chosen": 0.027172502130270004,
247
- "rewards/margins": 0.030774494633078575,
248
- "rewards/rejected": -0.00360199436545372,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9267343282699585,
255
- "logits/rejected": -1.9233125448226929,
256
- "logps/chosen": -36.03553009033203,
257
- "logps/rejected": -32.72822570800781,
258
- "loss": 23.8244,
259
- "rewards/accuracies": 0.6499999761581421,
260
- "rewards/chosen": 0.013614905066788197,
261
- "rewards/margins": 0.014162260107696056,
262
- "rewards/rejected": -0.0005473563214763999,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0270447731018066,
269
- "logits/rejected": -2.0197136402130127,
270
- "logps/chosen": -33.48883819580078,
271
- "logps/rejected": -31.42250633239746,
272
- "loss": 21.7573,
273
  "rewards/accuracies": 0.675000011920929,
274
- "rewards/chosen": 0.030902724713087082,
275
- "rewards/margins": 0.037968240678310394,
276
- "rewards/rejected": -0.007065513636916876,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.0334360599517822,
283
- "logits/rejected": -2.038677215576172,
284
- "logps/chosen": -32.235355377197266,
285
- "logps/rejected": -32.45377731323242,
286
- "loss": 22.5681,
287
- "rewards/accuracies": 0.6875,
288
- "rewards/chosen": 0.030943090096116066,
289
- "rewards/margins": 0.027806812897324562,
290
- "rewards/rejected": 0.0031362746376544237,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.034454345703125,
297
- "logits/rejected": -2.03169846534729,
298
- "logps/chosen": -31.2611141204834,
299
- "logps/rejected": -31.35662841796875,
300
- "loss": 22.7646,
301
- "rewards/accuracies": 0.675000011920929,
302
- "rewards/chosen": 0.022784242406487465,
303
- "rewards/margins": 0.026625871658325195,
304
- "rewards/rejected": -0.0038416311144828796,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9043190479278564,
311
- "logits/rejected": -1.9089492559432983,
312
- "logps/chosen": -31.331798553466797,
313
- "logps/rejected": -32.8171501159668,
314
- "loss": 22.4879,
315
- "rewards/accuracies": 0.699999988079071,
316
- "rewards/chosen": 0.027448315173387527,
317
- "rewards/margins": 0.030070018023252487,
318
- "rewards/rejected": -0.002621703315526247,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2306509017944336,
324
- "eval_logits/rejected": -2.2258191108703613,
325
- "eval_logps/chosen": -34.05928039550781,
326
- "eval_logps/rejected": -37.57795333862305,
327
- "eval_loss": 24.79918098449707,
328
- "eval_rewards/accuracies": 0.5598006844520569,
329
- "eval_rewards/chosen": -0.0024726546835154295,
330
- "eval_rewards/margins": 0.0036605680361390114,
331
- "eval_rewards/rejected": -0.006133222486823797,
332
- "eval_runtime": 145.6285,
333
- "eval_samples_per_second": 2.355,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.0171916484832764,
341
- "logits/rejected": -2.027843952178955,
342
- "logps/chosen": -31.748519897460938,
343
- "logps/rejected": -33.94535827636719,
344
- "loss": 22.396,
345
- "rewards/accuracies": 0.7124999761581421,
346
- "rewards/chosen": 0.022278117015957832,
347
- "rewards/margins": 0.030543917790055275,
348
- "rewards/rejected": -0.008265801705420017,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9094616174697876,
355
- "logits/rejected": -1.9241745471954346,
356
- "logps/chosen": -29.818714141845703,
357
- "logps/rejected": -31.6313533782959,
358
- "loss": 21.8415,
359
- "rewards/accuracies": 0.737500011920929,
360
- "rewards/chosen": 0.029282480478286743,
361
- "rewards/margins": 0.0366000160574913,
362
- "rewards/rejected": -0.007317539304494858,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9662542343139648,
369
- "logits/rejected": -1.9702177047729492,
370
- "logps/chosen": -33.06914138793945,
371
- "logps/rejected": -31.627248764038086,
372
- "loss": 21.4931,
373
- "rewards/accuracies": 0.7250000238418579,
374
- "rewards/chosen": 0.03483276441693306,
375
- "rewards/margins": 0.042627494782209396,
376
- "rewards/rejected": -0.007794731762260199,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.964835524559021,
383
- "logits/rejected": -1.9430347681045532,
384
- "logps/chosen": -33.808719635009766,
385
- "logps/rejected": -35.10743713378906,
386
- "loss": 20.8354,
387
- "rewards/accuracies": 0.7124999761581421,
388
- "rewards/chosen": 0.034502916038036346,
389
- "rewards/margins": 0.05016731098294258,
390
- "rewards/rejected": -0.015664398670196533,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.0063602924346924,
397
- "logits/rejected": -2.0030438899993896,
398
- "logps/chosen": -32.721397399902344,
399
- "logps/rejected": -36.288963317871094,
400
- "loss": 22.7702,
401
- "rewards/accuracies": 0.612500011920929,
402
- "rewards/chosen": 0.020366890355944633,
403
- "rewards/margins": 0.02677120640873909,
404
- "rewards/rejected": -0.006404316984117031,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8736040592193604,
411
- "logits/rejected": -1.8712146282196045,
412
- "logps/chosen": -33.984458923339844,
413
- "logps/rejected": -35.55829620361328,
414
- "loss": 22.8113,
415
- "rewards/accuracies": 0.737500011920929,
416
- "rewards/chosen": 0.020393703132867813,
417
- "rewards/margins": 0.02576116845011711,
418
- "rewards/rejected": -0.0053674690425395966,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8587570190429688,
425
- "logits/rejected": -1.8563568592071533,
426
- "logps/chosen": -34.19550704956055,
427
- "logps/rejected": -31.841039657592773,
428
- "loss": 22.7599,
429
- "rewards/accuracies": 0.6875,
430
- "rewards/chosen": 0.020208846777677536,
431
- "rewards/margins": 0.027179840952157974,
432
- "rewards/rejected": -0.00697099044919014,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9618046283721924,
439
- "logits/rejected": -1.9513158798217773,
440
- "logps/chosen": -34.9988899230957,
441
- "logps/rejected": -31.8876895904541,
442
- "loss": 21.4726,
443
- "rewards/accuracies": 0.699999988079071,
444
- "rewards/chosen": 0.03577522560954094,
445
- "rewards/margins": 0.040445104241371155,
446
- "rewards/rejected": -0.0046698772348463535,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.057340621948242,
453
- "logits/rejected": -2.042445659637451,
454
- "logps/chosen": -30.718120574951172,
455
- "logps/rejected": -32.65331268310547,
456
- "loss": 23.4433,
457
  "rewards/accuracies": 0.612500011920929,
458
- "rewards/chosen": 0.020600156858563423,
459
- "rewards/margins": 0.020748335868120193,
460
- "rewards/rejected": -0.00014818087220191956,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.928430199623108,
467
- "logits/rejected": -1.9259220361709595,
468
- "logps/chosen": -32.38062286376953,
469
- "logps/rejected": -30.925670623779297,
470
- "loss": 19.8329,
471
- "rewards/accuracies": 0.800000011920929,
472
- "rewards/chosen": 0.05464861914515495,
473
- "rewards/margins": 0.06553500890731812,
474
- "rewards/rejected": -0.010886380448937416,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.227757215499878,
480
- "eval_logits/rejected": -2.222930908203125,
481
- "eval_logps/chosen": -34.10100555419922,
482
- "eval_logps/rejected": -37.605979919433594,
483
- "eval_loss": 24.95565414428711,
484
- "eval_rewards/accuracies": 0.5215947031974792,
485
- "eval_rewards/chosen": -0.006645896937698126,
486
- "eval_rewards/margins": 0.0022900840267539024,
487
- "eval_rewards/rejected": -0.008935980498790741,
488
- "eval_runtime": 145.783,
489
- "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9124844074249268,
497
- "logits/rejected": -1.9092193841934204,
498
- "logps/chosen": -31.315048217773438,
499
- "logps/rejected": -33.801414489746094,
500
- "loss": 22.1909,
501
  "rewards/accuracies": 0.7250000238418579,
502
- "rewards/chosen": 0.028830338269472122,
503
- "rewards/margins": 0.034757114946842194,
504
- "rewards/rejected": -0.005926776677370071,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.963141679763794,
511
- "logits/rejected": -1.9509350061416626,
512
- "logps/chosen": -34.32793426513672,
513
- "logps/rejected": -33.663185119628906,
514
- "loss": 21.758,
515
- "rewards/accuracies": 0.6625000238418579,
516
- "rewards/chosen": 0.026814326643943787,
517
- "rewards/margins": 0.03924902528524399,
518
- "rewards/rejected": -0.012434699572622776,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9982473850250244,
525
- "logits/rejected": -1.9968185424804688,
526
- "logps/chosen": -33.15186309814453,
527
- "logps/rejected": -32.56055450439453,
528
- "loss": 21.5888,
529
- "rewards/accuracies": 0.7250000238418579,
530
- "rewards/chosen": 0.03339282050728798,
531
- "rewards/margins": 0.0414896085858345,
532
- "rewards/rejected": -0.008096789941191673,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.084803819656372,
539
- "logits/rejected": -2.069124698638916,
540
- "logps/chosen": -33.807334899902344,
541
- "logps/rejected": -33.127708435058594,
542
- "loss": 21.7884,
543
- "rewards/accuracies": 0.7124999761581421,
544
- "rewards/chosen": 0.03720540553331375,
545
- "rewards/margins": 0.0381726399064064,
546
- "rewards/rejected": -0.0009672341984696686,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9572391510009766,
553
- "logits/rejected": -1.9564039707183838,
554
- "logps/chosen": -32.83594512939453,
555
- "logps/rejected": -32.575740814208984,
556
- "loss": 20.605,
557
- "rewards/accuracies": 0.7250000238418579,
558
- "rewards/chosen": 0.04461529105901718,
559
- "rewards/margins": 0.05567503720521927,
560
- "rewards/rejected": -0.011059742420911789,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9132989645004272,
567
- "logits/rejected": -1.923563003540039,
568
- "logps/chosen": -31.88213539123535,
569
- "logps/rejected": -35.31838607788086,
570
- "loss": 22.111,
571
  "rewards/accuracies": 0.6625000238418579,
572
- "rewards/chosen": 0.03225552663207054,
573
- "rewards/margins": 0.03521796688437462,
574
- "rewards/rejected": -0.0029624411836266518,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0528197288513184,
581
- "logits/rejected": -2.046360492706299,
582
- "logps/chosen": -33.33294677734375,
583
- "logps/rejected": -29.250751495361328,
584
- "loss": 22.0205,
585
- "rewards/accuracies": 0.6875,
586
- "rewards/chosen": 0.03190212696790695,
587
- "rewards/margins": 0.03433450311422348,
588
- "rewards/rejected": -0.0024323747493326664,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9122035503387451,
595
- "logits/rejected": -1.9144351482391357,
596
- "logps/chosen": -33.86780548095703,
597
- "logps/rejected": -30.96420669555664,
598
- "loss": 21.1141,
599
- "rewards/accuracies": 0.800000011920929,
600
- "rewards/chosen": 0.03712456300854683,
601
- "rewards/margins": 0.046282440423965454,
602
- "rewards/rejected": -0.009157879278063774,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 22.846377404943688,
610
- "train_runtime": 3252.1936,
611
- "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8667426109313965,
29
+ "logits/rejected": -1.8710602521896362,
30
+ "logps/chosen": -36.991912841796875,
31
+ "logps/rejected": -33.67206954956055,
32
+ "loss": 0.9943,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.0014687532093375921,
35
+ "rewards/margins": 0.005667862948030233,
36
+ "rewards/rejected": -0.004199109505861998,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9977442026138306,
43
+ "logits/rejected": -2.0003952980041504,
44
+ "logps/chosen": -29.659366607666016,
45
+ "logps/rejected": -29.05437660217285,
46
+ "loss": 1.0026,
47
+ "rewards/accuracies": 0.4124999940395355,
48
+ "rewards/chosen": -0.0017170545179396868,
49
+ "rewards/margins": -0.002623475855216384,
50
+ "rewards/rejected": 0.0009064216283150017,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.920693039894104,
57
+ "logits/rejected": -1.91802179813385,
58
+ "logps/chosen": -31.39971351623535,
59
+ "logps/rejected": -33.21495819091797,
60
+ "loss": 0.9987,
61
+ "rewards/accuracies": 0.5625,
62
+ "rewards/chosen": 0.001637960784137249,
63
+ "rewards/margins": 0.0012880933936685324,
64
+ "rewards/rejected": 0.000349867797922343,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.018057107925415,
71
+ "logits/rejected": -2.0093047618865967,
72
+ "logps/chosen": -32.565284729003906,
73
+ "logps/rejected": -32.50053405761719,
74
+ "loss": 1.0001,
75
+ "rewards/accuracies": 0.5375000238418579,
76
+ "rewards/chosen": 0.0011519074905663729,
77
+ "rewards/margins": -6.170915730763227e-05,
78
+ "rewards/rejected": 0.0012136166915297508,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8627817630767822,
85
+ "logits/rejected": -1.851999044418335,
86
+ "logps/chosen": -33.549964904785156,
87
+ "logps/rejected": -35.44340896606445,
88
+ "loss": 1.0001,
89
+ "rewards/accuracies": 0.4749999940395355,
90
+ "rewards/chosen": 0.0007515085162594914,
91
+ "rewards/margins": -0.00014221524179447442,
92
+ "rewards/rejected": 0.000893724150955677,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9415528774261475,
99
+ "logits/rejected": -1.9434846639633179,
100
+ "logps/chosen": -32.53631591796875,
101
+ "logps/rejected": -33.215850830078125,
102
+ "loss": 0.9892,
103
+ "rewards/accuracies": 0.5625,
104
+ "rewards/chosen": 0.006386542227119207,
105
+ "rewards/margins": 0.010833840817213058,
106
+ "rewards/rejected": -0.004447298124432564,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.072315216064453,
113
+ "logits/rejected": -2.07728910446167,
114
+ "logps/chosen": -33.98168182373047,
115
+ "logps/rejected": -36.64748001098633,
116
+ "loss": 0.9915,
117
  "rewards/accuracies": 0.5625,
118
+ "rewards/chosen": 0.0006806664168834686,
119
+ "rewards/margins": 0.00854154396802187,
120
+ "rewards/rejected": -0.007860877551138401,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.932929277420044,
127
+ "logits/rejected": -1.9360615015029907,
128
+ "logps/chosen": -34.32741928100586,
129
+ "logps/rejected": -34.65910720825195,
130
+ "loss": 0.9833,
131
+ "rewards/accuracies": 0.625,
132
+ "rewards/chosen": 0.010038824751973152,
133
+ "rewards/margins": 0.01670856587588787,
134
+ "rewards/rejected": -0.006669741123914719,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9398362636566162,
141
+ "logits/rejected": -1.9443330764770508,
142
+ "logps/chosen": -32.37015914916992,
143
+ "logps/rejected": -32.354644775390625,
144
+ "loss": 0.9907,
145
+ "rewards/accuracies": 0.6625000238418579,
146
+ "rewards/chosen": 0.009726046584546566,
147
+ "rewards/margins": 0.00929970107972622,
148
+ "rewards/rejected": 0.0004263453301973641,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.036825656890869,
155
+ "logits/rejected": -2.0348496437072754,
156
+ "logps/chosen": -32.12454605102539,
157
+ "logps/rejected": -31.299402236938477,
158
+ "loss": 0.9851,
159
+ "rewards/accuracies": 0.625,
160
+ "rewards/chosen": 0.011629154905676842,
161
+ "rewards/margins": 0.014948204159736633,
162
+ "rewards/rejected": -0.003319049719721079,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.230534791946411,
168
+ "eval_logits/rejected": -2.225694417953491,
169
+ "eval_logps/chosen": -34.02181625366211,
170
+ "eval_logps/rejected": -37.53096389770508,
171
+ "eval_loss": 0.9973288774490356,
172
+ "eval_rewards/accuracies": 0.531146228313446,
173
+ "eval_rewards/chosen": 0.0012735594063997269,
174
+ "eval_rewards/margins": 0.002707866718992591,
175
+ "eval_rewards/rejected": -0.0014343069633468986,
176
+ "eval_runtime": 145.9767,
177
  "eval_samples_per_second": 2.35,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
 
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.991271734237671,
185
+ "logits/rejected": -1.9888839721679688,
186
+ "logps/chosen": -33.108524322509766,
187
+ "logps/rejected": -33.99832534790039,
188
+ "loss": 0.9896,
189
  "rewards/accuracies": 0.612500011920929,
190
+ "rewards/chosen": 0.013529380783438683,
191
+ "rewards/margins": 0.01042473316192627,
192
+ "rewards/rejected": 0.0031046485528349876,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0025076866149902,
199
+ "logits/rejected": -1.9941962957382202,
200
+ "logps/chosen": -32.313053131103516,
201
+ "logps/rejected": -32.1397705078125,
202
+ "loss": 0.989,
203
+ "rewards/accuracies": 0.6000000238418579,
204
+ "rewards/chosen": 0.013185250572860241,
205
+ "rewards/margins": 0.011011496186256409,
206
+ "rewards/rejected": 0.002173755317926407,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.030679941177368,
213
+ "logits/rejected": -2.022730827331543,
214
+ "logps/chosen": -30.313289642333984,
215
+ "logps/rejected": -32.024986267089844,
216
+ "loss": 0.9866,
217
+ "rewards/accuracies": 0.5249999761581421,
218
+ "rewards/chosen": 0.016243018209934235,
219
+ "rewards/margins": 0.013441057875752449,
220
+ "rewards/rejected": 0.00280195870436728,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9600480794906616,
227
+ "logits/rejected": -1.9702802896499634,
228
+ "logps/chosen": -31.212570190429688,
229
+ "logps/rejected": -32.54707717895508,
230
+ "loss": 0.9781,
231
+ "rewards/accuracies": 0.637499988079071,
232
+ "rewards/chosen": 0.020709021016955376,
233
+ "rewards/margins": 0.02192843146622181,
234
+ "rewards/rejected": -0.0012194132432341576,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8716413974761963,
241
+ "logits/rejected": -1.8728001117706299,
242
+ "logps/chosen": -33.884464263916016,
243
+ "logps/rejected": -34.786155700683594,
244
+ "loss": 0.9663,
245
+ "rewards/accuracies": 0.625,
246
+ "rewards/chosen": 0.030822690576314926,
247
+ "rewards/margins": 0.03371291980147362,
248
+ "rewards/rejected": -0.0028902278281748295,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.923204779624939,
255
+ "logits/rejected": -1.9197826385498047,
256
+ "logps/chosen": -35.9670295715332,
257
+ "logps/rejected": -32.66301727294922,
258
+ "loss": 0.9855,
259
+ "rewards/accuracies": 0.625,
260
+ "rewards/chosen": 0.02046511135995388,
261
+ "rewards/margins": 0.014491458423435688,
262
+ "rewards/rejected": 0.00597365340217948,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0242836475372314,
269
+ "logits/rejected": -2.0169827938079834,
270
+ "logps/chosen": -33.456817626953125,
271
+ "logps/rejected": -31.400043487548828,
272
+ "loss": 0.9611,
273
  "rewards/accuracies": 0.675000011920929,
274
+ "rewards/chosen": 0.03410445898771286,
275
+ "rewards/margins": 0.038923610001802444,
276
+ "rewards/rejected": -0.004819151014089584,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0301671028137207,
283
+ "logits/rejected": -2.0354058742523193,
284
+ "logps/chosen": -32.161399841308594,
285
+ "logps/rejected": -32.41175842285156,
286
+ "loss": 0.969,
287
+ "rewards/accuracies": 0.7124999761581421,
288
+ "rewards/chosen": 0.03833850100636482,
289
+ "rewards/margins": 0.031000157818198204,
290
+ "rewards/rejected": 0.007338343653827906,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0318570137023926,
297
+ "logits/rejected": -2.0290966033935547,
298
+ "logps/chosen": -31.263225555419922,
299
+ "logps/rejected": -31.29646873474121,
300
+ "loss": 0.9796,
301
+ "rewards/accuracies": 0.6000000238418579,
302
+ "rewards/chosen": 0.022572491317987442,
303
+ "rewards/margins": 0.0203980952501297,
304
+ "rewards/rejected": 0.0021743960678577423,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9017003774642944,
311
+ "logits/rejected": -1.906355619430542,
312
+ "logps/chosen": -31.227977752685547,
313
+ "logps/rejected": -32.81672286987305,
314
+ "loss": 0.9596,
315
+ "rewards/accuracies": 0.75,
316
+ "rewards/chosen": 0.03783063963055611,
317
+ "rewards/margins": 0.0404098778963089,
318
+ "rewards/rejected": -0.002579244552180171,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2284843921661377,
324
+ "eval_logits/rejected": -2.2236602306365967,
325
+ "eval_logps/chosen": -34.00685501098633,
326
+ "eval_logps/rejected": -37.54502868652344,
327
+ "eval_loss": 0.9944055080413818,
328
+ "eval_rewards/accuracies": 0.5336378812789917,
329
+ "eval_rewards/chosen": 0.0027695323806256056,
330
+ "eval_rewards/margins": 0.005610140040516853,
331
+ "eval_rewards/rejected": -0.002840606961399317,
332
+ "eval_runtime": 145.7295,
333
+ "eval_samples_per_second": 2.354,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0140950679779053,
341
+ "logits/rejected": -2.024714946746826,
342
+ "logps/chosen": -31.731609344482422,
343
+ "logps/rejected": -33.924285888671875,
344
+ "loss": 0.9699,
345
+ "rewards/accuracies": 0.625,
346
+ "rewards/chosen": 0.02396966516971588,
347
+ "rewards/margins": 0.030128711834549904,
348
+ "rewards/rejected": -0.006159046199172735,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9061514139175415,
355
+ "logits/rejected": -1.9208866357803345,
356
+ "logps/chosen": -29.77530288696289,
357
+ "logps/rejected": -31.6021728515625,
358
+ "loss": 0.962,
359
+ "rewards/accuracies": 0.699999988079071,
360
+ "rewards/chosen": 0.03362376615405083,
361
+ "rewards/margins": 0.038023434579372406,
362
+ "rewards/rejected": -0.004399660509079695,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9625619649887085,
369
+ "logits/rejected": -1.9665281772613525,
370
+ "logps/chosen": -33.06541061401367,
371
+ "logps/rejected": -31.63521385192871,
372
+ "loss": 0.9562,
373
+ "rewards/accuracies": 0.6625000238418579,
374
+ "rewards/chosen": 0.0352059006690979,
375
+ "rewards/margins": 0.04379696026444435,
376
+ "rewards/rejected": -0.008591057732701302,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.960546851158142,
383
+ "logits/rejected": -1.938780426979065,
384
+ "logps/chosen": -33.803611755371094,
385
+ "logps/rejected": -35.09512710571289,
386
+ "loss": 0.9506,
387
+ "rewards/accuracies": 0.675000011920929,
388
+ "rewards/chosen": 0.03501402214169502,
389
+ "rewards/margins": 0.049447186291217804,
390
+ "rewards/rejected": -0.01443316787481308,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.002559185028076,
397
+ "logits/rejected": -1.9992578029632568,
398
+ "logps/chosen": -32.70106506347656,
399
+ "logps/rejected": -36.2247428894043,
400
+ "loss": 0.9776,
401
+ "rewards/accuracies": 0.6499999761581421,
402
+ "rewards/chosen": 0.0224003903567791,
403
+ "rewards/margins": 0.022382449358701706,
404
+ "rewards/rejected": 1.793876253941562e-05,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8695703744888306,
411
+ "logits/rejected": -1.867144227027893,
412
+ "logps/chosen": -33.96361541748047,
413
+ "logps/rejected": -35.52811050415039,
414
+ "loss": 0.9752,
415
+ "rewards/accuracies": 0.6625000238418579,
416
+ "rewards/chosen": 0.022478096187114716,
417
+ "rewards/margins": 0.024826791137456894,
418
+ "rewards/rejected": -0.0023486947175115347,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8547470569610596,
425
+ "logits/rejected": -1.8523423671722412,
426
+ "logps/chosen": -34.13903045654297,
427
+ "logps/rejected": -31.83770179748535,
428
+ "loss": 0.9675,
429
+ "rewards/accuracies": 0.6625000238418579,
430
+ "rewards/chosen": 0.02585705555975437,
431
+ "rewards/margins": 0.03249421715736389,
432
+ "rewards/rejected": -0.006637162063270807,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9572479724884033,
439
+ "logits/rejected": -1.9467785358428955,
440
+ "logps/chosen": -34.97367858886719,
441
+ "logps/rejected": -31.858196258544922,
442
+ "loss": 0.96,
443
+ "rewards/accuracies": 0.7124999761581421,
444
+ "rewards/chosen": 0.03829622268676758,
445
+ "rewards/margins": 0.04001673310995102,
446
+ "rewards/rejected": -0.001720509259030223,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0524978637695312,
453
+ "logits/rejected": -2.0376133918762207,
454
+ "logps/chosen": -30.694583892822266,
455
+ "logps/rejected": -32.60517501831055,
456
+ "loss": 0.9817,
457
  "rewards/accuracies": 0.612500011920929,
458
+ "rewards/chosen": 0.022954344749450684,
459
+ "rewards/margins": 0.01828841306269169,
460
+ "rewards/rejected": 0.004665931686758995,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.923266053199768,
467
+ "logits/rejected": -1.9207680225372314,
468
+ "logps/chosen": -32.310142517089844,
469
+ "logps/rejected": -30.895198822021484,
470
+ "loss": 0.9305,
471
+ "rewards/accuracies": 0.762499988079071,
472
+ "rewards/chosen": 0.061696797609329224,
473
+ "rewards/margins": 0.06953591853380203,
474
+ "rewards/rejected": -0.007839125581085682,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.224316358566284,
480
+ "eval_logits/rejected": -2.2194955348968506,
481
+ "eval_logps/chosen": -34.0507698059082,
482
+ "eval_logps/rejected": -37.57167053222656,
483
+ "eval_loss": 0.9961203336715698,
484
+ "eval_rewards/accuracies": 0.559385359287262,
485
+ "eval_rewards/chosen": -0.001622139709070325,
486
+ "eval_rewards/margins": 0.0038830821868032217,
487
+ "eval_rewards/rejected": -0.005505221430212259,
488
+ "eval_runtime": 145.6991,
489
+ "eval_samples_per_second": 2.354,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9086971282958984,
497
+ "logits/rejected": -1.9054778814315796,
498
+ "logps/chosen": -31.2874755859375,
499
+ "logps/rejected": -33.80743408203125,
500
+ "loss": 0.9619,
501
  "rewards/accuracies": 0.7250000238418579,
502
+ "rewards/chosen": 0.03158777207136154,
503
+ "rewards/margins": 0.03811599686741829,
504
+ "rewards/rejected": -0.006528225727379322,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.958402395248413,
511
+ "logits/rejected": -1.946244478225708,
512
+ "logps/chosen": -34.26841735839844,
513
+ "logps/rejected": -33.66584014892578,
514
+ "loss": 0.9545,
515
+ "rewards/accuracies": 0.699999988079071,
516
+ "rewards/chosen": 0.03276572749018669,
517
+ "rewards/margins": 0.04546588286757469,
518
+ "rewards/rejected": -0.012700155377388,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9934791326522827,
525
+ "logits/rejected": -1.9920657873153687,
526
+ "logps/chosen": -33.07835006713867,
527
+ "logps/rejected": -32.51830291748047,
528
+ "loss": 0.9554,
529
+ "rewards/accuracies": 0.75,
530
+ "rewards/chosen": 0.040744177997112274,
531
+ "rewards/margins": 0.0446164533495903,
532
+ "rewards/rejected": -0.0038722690660506487,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0803451538085938,
539
+ "logits/rejected": -2.064704418182373,
540
+ "logps/chosen": -33.757957458496094,
541
+ "logps/rejected": -33.0684700012207,
542
+ "loss": 0.9628,
543
+ "rewards/accuracies": 0.675000011920929,
544
+ "rewards/chosen": 0.0421435609459877,
545
+ "rewards/margins": 0.037186361849308014,
546
+ "rewards/rejected": 0.004957201890647411,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9529346227645874,
553
+ "logits/rejected": -1.9521318674087524,
554
+ "logps/chosen": -32.80330276489258,
555
+ "logps/rejected": -32.498817443847656,
556
+ "loss": 0.9488,
557
+ "rewards/accuracies": 0.6875,
558
+ "rewards/chosen": 0.04787950962781906,
559
+ "rewards/margins": 0.051246631890535355,
560
+ "rewards/rejected": -0.0033671148121356964,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.908622145652771,
567
+ "logits/rejected": -1.9189058542251587,
568
+ "logps/chosen": -31.85636329650879,
569
+ "logps/rejected": -35.29613494873047,
570
+ "loss": 0.9644,
571
  "rewards/accuracies": 0.6625000238418579,
572
+ "rewards/chosen": 0.03483304753899574,
573
+ "rewards/margins": 0.03557039424777031,
574
+ "rewards/rejected": -0.0007373450207524002,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.048346757888794,
581
+ "logits/rejected": -2.041914939880371,
582
+ "logps/chosen": -33.28711700439453,
583
+ "logps/rejected": -29.241107940673828,
584
+ "loss": 0.962,
585
+ "rewards/accuracies": 0.7124999761581421,
586
+ "rewards/chosen": 0.03648493438959122,
587
+ "rewards/margins": 0.03795299679040909,
588
+ "rewards/rejected": -0.001468065194785595,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9080861806869507,
595
+ "logits/rejected": -1.910278558731079,
596
+ "logps/chosen": -33.861846923828125,
597
+ "logps/rejected": -30.95591163635254,
598
+ "loss": 0.954,
599
+ "rewards/accuracies": 0.6625000238418579,
600
+ "rewards/chosen": 0.03772079199552536,
601
+ "rewards/margins": 0.04604965075850487,
602
+ "rewards/rejected": -0.00832886528223753,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.9734619264478808,
610
+ "train_runtime": 3253.307,
611
+ "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],