hugodk-sch commited on
Commit
328926c
1 Parent(s): b87d259

Model save

Browse files
Files changed (5) hide show
  1. README.md +15 -18
  2. adapter_model.safetensors +1 -1
  3. all_results.json +4 -17
  4. train_results.json +4 -4
  5. trainer_state.json +419 -419
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.9991
24
- - Rewards/chosen: 0.0161
25
- - Rewards/rejected: 0.0150
26
- - Rewards/accuracies: 0.5365
27
- - Rewards/margins: 0.0010
28
- - Logps/rejected: -37.4999
29
- - Logps/chosen: -34.0167
30
- - Logits/rejected: -2.2391
31
- - Logits/chosen: -2.2439
32
 
33
  ## Model description
34
 
@@ -61,11 +58,11 @@ The following hyperparameters were used during training:
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.9823 | 0.26 | 100 | -2.2440 | -2.2391 | -34.0216 | -37.5058 | 0.9982 | 0.5104 | 0.0116 | 0.0019 | 0.0097 |
67
- | 0.9696 | 0.52 | 200 | -2.2439 | -2.2390 | -34.0217 | -37.5024 | 1.0014 | 0.4693 | 0.0116 | -0.0012 | 0.0128 |
68
- | 0.9581 | 0.78 | 300 | -2.2437 | -2.2388 | -34.0306 | -37.4994 | 1.0120 | 0.4726 | 0.0035 | -0.0120 | 0.0155 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.6929
21
+ - Rewards/chosen: -0.0003
22
+ - Rewards/rejected: -0.0007
23
+ - Rewards/accuracies: 0.5419
24
+ - Rewards/margins: 0.0004
25
+ - Logps/rejected: -37.5868
26
+ - Logps/chosen: -34.0648
27
+ - Logits/rejected: -2.2163
28
+ - Logits/chosen: -2.2211
29
 
30
  ## Model description
31
 
 
58
 
59
  ### Training results
60
 
61
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6924 | 0.26 | 100 | 0.6931 | -0.0001 | -0.0002 | 0.5187 | 0.0001 | -37.5393 | -34.0468 | -2.2252 | -2.2301 |
64
+ | 0.6914 | 0.52 | 200 | 0.6930 | -0.0001 | -0.0004 | 0.5245 | 0.0003 | -37.5530 | -34.0426 | -2.2201 | -2.2250 |
65
+ | 0.6895 | 0.78 | 300 | 0.6929 | -0.0003 | -0.0007 | 0.5419 | 0.0004 | -37.5868 | -34.0648 | -2.2163 | -2.2211 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b62113b40e07a216428627da02a746dc9cd8cf45c85cdf8f158153476744aada
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c85c938a116836c2be173e9bdc671f7190ee519e02c0173d3562a6d2d1044243
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2439348697662354,
4
- "eval_logits/rejected": -2.2390594482421875,
5
- "eval_logps/chosen": -34.016693115234375,
6
- "eval_logps/rejected": -37.499900817871094,
7
- "eval_loss": 0.9991074204444885,
8
- "eval_rewards/accuracies": 0.5365448594093323,
9
- "eval_rewards/chosen": 0.01607290655374527,
10
- "eval_rewards/margins": 0.0010251295752823353,
11
- "eval_rewards/rejected": 0.015047776512801647,
12
- "eval_runtime": 146.1502,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.347,
15
- "eval_steps_per_second": 0.294,
16
- "train_loss": 0.21873634759481853,
17
- "train_runtime": 627.4596,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 4.907,
20
- "train_steps_per_second": 0.614
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6918259131443965,
4
+ "train_runtime": 3249.6401,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
+ "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.21873634759481853,
4
- "train_runtime": 627.4596,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 4.907,
7
- "train_steps_per_second": 0.614
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6918259131443965,
4
+ "train_runtime": 3249.6401,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -10,12 +10,12 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "learning_rate": 1.282051282051282e-08,
14
  "logits/chosen": -1.7278180122375488,
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -24,592 +24,592 @@
24
  },
25
  {
26
  "epoch": 0.03,
27
- "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.8661272525787354,
29
- "logits/rejected": -1.8704373836517334,
30
- "logps/chosen": -36.993247985839844,
31
- "logps/rejected": -33.67000961303711,
32
- "loss": 0.952,
33
- "rewards/accuracies": 0.5138888955116272,
34
- "rewards/chosen": 0.012012440711259842,
35
- "rewards/margins": 0.047955650836229324,
36
- "rewards/rejected": -0.035943206399679184,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
- "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.9975776672363281,
43
- "logits/rejected": -2.000213146209717,
44
- "logps/chosen": -29.647293090820312,
45
- "logps/rejected": -29.056921005249023,
46
- "loss": 1.0105,
47
- "rewards/accuracies": 0.5,
48
- "rewards/chosen": -0.00459072832018137,
49
- "rewards/margins": -0.010462107136845589,
50
- "rewards/rejected": 0.005871377885341644,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
- "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.920013189315796,
57
- "logits/rejected": -1.917337417602539,
58
- "logps/chosen": -31.4055233001709,
59
- "logps/rejected": -33.22685623168945,
60
- "loss": 0.9829,
61
- "rewards/accuracies": 0.5625,
62
- "rewards/chosen": 0.009516814723610878,
63
- "rewards/margins": 0.01707320474088192,
64
- "rewards/rejected": -0.007556394673883915,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
- "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.0167124271392822,
71
- "logits/rejected": -2.0079712867736816,
72
- "logps/chosen": -32.590545654296875,
73
- "logps/rejected": -32.499671936035156,
74
- "loss": 1.0241,
75
- "rewards/accuracies": 0.4375,
76
- "rewards/chosen": -0.01236567273736,
77
- "rewards/margins": -0.024063030257821083,
78
- "rewards/rejected": 0.011697360314428806,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
- "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8641105890274048,
85
- "logits/rejected": -1.8533456325531006,
86
- "logps/chosen": -33.56208038330078,
87
- "logps/rejected": -35.437400817871094,
88
- "loss": 1.0176,
89
- "rewards/accuracies": 0.4375,
90
- "rewards/chosen": -0.004139312542974949,
91
- "rewards/margins": -0.017586929723620415,
92
- "rewards/rejected": 0.013447612524032593,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
- "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9452192783355713,
99
- "logits/rejected": -1.9471514225006104,
100
- "logps/chosen": -32.587867736816406,
101
- "logps/rejected": -33.20810317993164,
102
- "loss": 0.9559,
103
- "rewards/accuracies": 0.5874999761581421,
104
- "rewards/chosen": 0.01108462829142809,
105
- "rewards/margins": 0.04413585737347603,
106
- "rewards/rejected": -0.033051230013370514,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
- "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.0798697471618652,
113
- "logits/rejected": -2.084836483001709,
114
- "logps/chosen": -33.99953079223633,
115
- "logps/rejected": -36.59130859375,
116
- "loss": 0.9897,
117
- "rewards/accuracies": 0.4749999940395355,
118
- "rewards/chosen": -0.009940117597579956,
119
- "rewards/margins": 0.010251833125948906,
120
- "rewards/rejected": -0.020191945135593414,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
- "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9422235488891602,
127
- "logits/rejected": -1.9453849792480469,
128
- "logps/chosen": -34.411861419677734,
129
- "logps/rejected": -34.599830627441406,
130
- "loss": 0.979,
131
- "rewards/accuracies": 0.5625,
132
- "rewards/chosen": 0.014353242702782154,
133
- "rewards/margins": 0.02102992869913578,
134
- "rewards/rejected": -0.006676685996353626,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
- "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.950791358947754,
141
- "logits/rejected": -1.9553003311157227,
142
- "logps/chosen": -32.45661163330078,
143
- "logps/rejected": -32.35947799682617,
144
- "loss": 0.9898,
145
- "rewards/accuracies": 0.5375000238418579,
146
- "rewards/chosen": 0.009730497375130653,
147
- "rewards/margins": 0.010243801400065422,
148
- "rewards/rejected": -0.0005133040249347687,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
- "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.0489749908447266,
155
- "logits/rejected": -2.046968460083008,
156
- "logps/chosen": -32.233360290527344,
157
- "logps/rejected": -31.278451919555664,
158
- "loss": 0.9823,
159
- "rewards/accuracies": 0.512499988079071,
160
- "rewards/chosen": 0.006729811429977417,
161
- "rewards/margins": 0.01774778962135315,
162
- "rewards/rejected": -0.011017980054020882,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.243974447250366,
168
- "eval_logits/rejected": -2.239091634750366,
169
- "eval_logps/chosen": -34.021610260009766,
170
- "eval_logps/rejected": -37.50578689575195,
171
- "eval_loss": 0.9981797933578491,
172
- "eval_rewards/accuracies": 0.5103820562362671,
173
- "eval_rewards/chosen": 0.011647163890302181,
174
- "eval_rewards/margins": 0.0018998426385223866,
175
- "eval_rewards/rejected": 0.009747318923473358,
176
- "eval_runtime": 146.0735,
177
- "eval_samples_per_second": 2.348,
178
- "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
- "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.0054612159729004,
185
- "logits/rejected": -2.003042221069336,
186
- "logps/chosen": -33.2550048828125,
187
- "logps/rejected": -34.02202224731445,
188
- "loss": 1.0167,
189
- "rewards/accuracies": 0.44999998807907104,
190
- "rewards/chosen": -0.010068392381072044,
191
- "rewards/margins": -0.016683781519532204,
192
- "rewards/rejected": 0.006615391466766596,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
- "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.016511917114258,
199
- "logits/rejected": -2.008146047592163,
200
- "logps/chosen": -32.45395278930664,
201
- "logps/rejected": -32.17681121826172,
202
- "loss": 0.9944,
203
- "rewards/accuracies": 0.5249999761581421,
204
- "rewards/chosen": -0.008141009137034416,
205
- "rewards/margins": 0.00563241308555007,
206
- "rewards/rejected": -0.013773423619568348,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
- "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0463106632232666,
213
- "logits/rejected": -2.0382590293884277,
214
- "logps/chosen": -30.476207733154297,
215
- "logps/rejected": -32.020484924316406,
216
- "loss": 1.0297,
217
- "rewards/accuracies": 0.3499999940395355,
218
- "rewards/chosen": -0.00044600124238058925,
219
- "rewards/margins": -0.029714178293943405,
220
- "rewards/rejected": 0.02926817536354065,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
- "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.9767357110977173,
227
- "logits/rejected": -1.986999273300171,
228
- "logps/chosen": -31.381155014038086,
229
- "logps/rejected": -32.54108428955078,
230
- "loss": 0.9598,
231
- "rewards/accuracies": 0.5375000238418579,
232
- "rewards/chosen": 0.034655503928661346,
233
- "rewards/margins": 0.04024083539843559,
234
- "rewards/rejected": -0.005585335195064545,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
- "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8906068801879883,
241
- "logits/rejected": -1.8916860818862915,
242
- "logps/chosen": -34.20075225830078,
243
- "logps/rejected": -34.7618522644043,
244
- "loss": 1.0031,
245
- "rewards/accuracies": 0.4124999940395355,
246
- "rewards/chosen": -0.007252261973917484,
247
- "rewards/margins": -0.0031175222247838974,
248
- "rewards/rejected": -0.004134741611778736,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
- "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.942251443862915,
255
- "logits/rejected": -1.9387743473052979,
256
- "logps/chosen": -36.145790100097656,
257
- "logps/rejected": -32.73196029663086,
258
- "loss": 0.9684,
259
- "rewards/accuracies": 0.512499988079071,
260
- "rewards/chosen": 0.023301539942622185,
261
- "rewards/margins": 0.031586963683366776,
262
- "rewards/rejected": -0.008285422809422016,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
- "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.041952610015869,
269
- "logits/rejected": -2.0345559120178223,
270
- "logps/chosen": -33.81856155395508,
271
- "logps/rejected": -31.359323501586914,
272
- "loss": 1.0119,
273
- "rewards/accuracies": 0.4749999940395355,
274
- "rewards/chosen": -0.018632039427757263,
275
- "rewards/margins": -0.011905943043529987,
276
- "rewards/rejected": -0.006726093590259552,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
- "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0473170280456543,
283
- "logits/rejected": -2.0525729656219482,
284
- "logps/chosen": -32.52764129638672,
285
- "logps/rejected": -32.50192642211914,
286
- "loss": 0.9695,
287
- "rewards/accuracies": 0.574999988079071,
288
- "rewards/chosen": 0.015429094433784485,
289
- "rewards/margins": 0.030534446239471436,
290
- "rewards/rejected": -0.015105349011719227,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
- "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.048530340194702,
297
- "logits/rejected": -2.045767307281494,
298
- "logps/chosen": -31.485219955444336,
299
- "logps/rejected": -31.34262466430664,
300
- "loss": 0.9747,
301
- "rewards/accuracies": 0.5874999761581421,
302
- "rewards/chosen": 0.003360496135428548,
303
- "rewards/margins": 0.02533094584941864,
304
- "rewards/rejected": -0.02197045087814331,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
- "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.918745756149292,
311
- "logits/rejected": -1.9234097003936768,
312
- "logps/chosen": -31.576425552368164,
313
- "logps/rejected": -32.79480743408203,
314
- "loss": 0.9696,
315
- "rewards/accuracies": 0.5375000238418579,
316
- "rewards/chosen": 0.026870254427194595,
317
- "rewards/margins": 0.03035963699221611,
318
- "rewards/rejected": -0.0034893869888037443,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.243912696838379,
324
- "eval_logits/rejected": -2.2390317916870117,
325
- "eval_logps/chosen": -34.021690368652344,
326
- "eval_logps/rejected": -37.502418518066406,
327
- "eval_loss": 1.001371145248413,
328
- "eval_rewards/accuracies": 0.4692690968513489,
329
- "eval_rewards/chosen": 0.011576101183891296,
330
- "eval_rewards/margins": -0.0012021720176562667,
331
- "eval_rewards/rejected": 0.012778270989656448,
332
- "eval_runtime": 145.8493,
333
- "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
- "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.0318515300750732,
341
- "logits/rejected": -2.0425400733947754,
342
- "logps/chosen": -31.96026039123535,
343
- "logps/rejected": -33.8691291809082,
344
- "loss": 0.9843,
345
- "rewards/accuracies": 0.625,
346
- "rewards/chosen": 0.009936656802892685,
347
- "rewards/margins": 0.015721982344985008,
348
- "rewards/rejected": -0.005785329267382622,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
- "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9256188869476318,
355
- "logits/rejected": -1.9404585361480713,
356
- "logps/chosen": -30.080001831054688,
357
- "logps/rejected": -31.53633689880371,
358
- "loss": 0.9913,
359
- "rewards/accuracies": 0.512499988079071,
360
- "rewards/chosen": 0.028384273871779442,
361
- "rewards/margins": 0.00873211957514286,
362
- "rewards/rejected": 0.01965215802192688,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
- "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.9830671548843384,
369
- "logits/rejected": -1.9870269298553467,
370
- "logps/chosen": -33.386474609375,
371
- "logps/rejected": -31.55719566345215,
372
- "loss": 0.965,
373
- "rewards/accuracies": 0.5249999761581421,
374
- "rewards/chosen": 0.02789575420320034,
375
- "rewards/margins": 0.03499894589185715,
376
- "rewards/rejected": -0.007103185169398785,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
- "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9833042621612549,
383
- "logits/rejected": -1.96132493019104,
384
- "logps/chosen": -34.13536834716797,
385
- "logps/rejected": -34.952049255371094,
386
- "loss": 0.9823,
387
- "rewards/accuracies": 0.5375000238418579,
388
- "rewards/chosen": 0.016542484983801842,
389
- "rewards/margins": 0.01767394319176674,
390
- "rewards/rejected": -0.0011314575094729662,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
- "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0247886180877686,
397
- "logits/rejected": -2.0214812755584717,
398
- "logps/chosen": -32.913856506347656,
399
- "logps/rejected": -36.23151397705078,
400
- "loss": 0.984,
401
- "rewards/accuracies": 0.5625,
402
- "rewards/chosen": 0.010092089883983135,
403
- "rewards/margins": 0.016022875905036926,
404
- "rewards/rejected": -0.0059307836927473545,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
- "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.8914467096328735,
411
- "logits/rejected": -1.8890043497085571,
412
- "logps/chosen": -34.18690872192383,
413
- "logps/rejected": -35.524742126464844,
414
- "loss": 0.9806,
415
- "rewards/accuracies": 0.5375000238418579,
416
- "rewards/chosen": 0.0013398710871115327,
417
- "rewards/margins": 0.019445054233074188,
418
- "rewards/rejected": -0.01810518093407154,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
- "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.8761389255523682,
425
- "logits/rejected": -1.8735923767089844,
426
- "logps/chosen": -34.396785736083984,
427
- "logps/rejected": -31.74139976501465,
428
- "loss": 1.0262,
429
- "rewards/accuracies": 0.4000000059604645,
430
- "rewards/chosen": 0.000734941684640944,
431
- "rewards/margins": -0.026202131062746048,
432
- "rewards/rejected": 0.02693706750869751,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
- "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9801757335662842,
439
- "logits/rejected": -1.9695546627044678,
440
- "logps/chosen": -35.34246063232422,
441
- "logps/rejected": -31.823230743408203,
442
- "loss": 1.0032,
443
- "rewards/accuracies": 0.4749999940395355,
444
- "rewards/chosen": 0.012763315811753273,
445
- "rewards/margins": -0.003222113475203514,
446
- "rewards/rejected": 0.015985429286956787,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
- "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.075316905975342,
453
- "logits/rejected": -2.0602917671203613,
454
- "logps/chosen": -30.897375106811523,
455
- "logps/rejected": -32.64093780517578,
456
- "loss": 0.9857,
457
- "rewards/accuracies": 0.5249999761581421,
458
- "rewards/chosen": 0.024072837084531784,
459
- "rewards/margins": 0.014264687895774841,
460
- "rewards/rejected": 0.009808152914047241,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
- "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.946758508682251,
467
- "logits/rejected": -1.9442226886749268,
468
- "logps/chosen": -32.89502716064453,
469
- "logps/rejected": -30.831274032592773,
470
- "loss": 0.9581,
471
- "rewards/accuracies": 0.5625,
472
- "rewards/chosen": 0.0288736280053854,
473
- "rewards/margins": 0.04189068451523781,
474
- "rewards/rejected": -0.013017055578529835,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2437047958374023,
480
- "eval_logits/rejected": -2.2388339042663574,
481
- "eval_logps/chosen": -34.03062438964844,
482
- "eval_logps/rejected": -37.499412536621094,
483
- "eval_loss": 1.0120128393173218,
484
- "eval_rewards/accuracies": 0.472591370344162,
485
- "eval_rewards/chosen": 0.003532935632392764,
486
- "eval_rewards/margins": -0.011953731998801231,
487
- "eval_rewards/rejected": 0.015486669726669788,
488
- "eval_runtime": 145.7744,
489
  "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
- "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.928391695022583,
497
- "logits/rejected": -1.925132393836975,
498
- "logps/chosen": -31.573719024658203,
499
- "logps/rejected": -33.733333587646484,
500
- "loss": 0.9813,
501
- "rewards/accuracies": 0.574999988079071,
502
- "rewards/chosen": 0.026669049635529518,
503
- "rewards/margins": 0.01873381994664669,
504
- "rewards/rejected": 0.007935228757560253,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
- "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9808565378189087,
511
- "logits/rejected": -1.9685465097427368,
512
- "logps/chosen": -34.569026947021484,
513
- "logps/rejected": -33.5385856628418,
514
- "loss": 0.9759,
515
- "rewards/accuracies": 0.5874999761581421,
516
- "rewards/chosen": 0.02433854714035988,
517
- "rewards/margins": 0.024108227342367172,
518
- "rewards/rejected": 0.00023032091849017888,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
- "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.0160164833068848,
525
- "logits/rejected": -2.0145697593688965,
526
- "logps/chosen": -33.470794677734375,
527
- "logps/rejected": -32.465301513671875,
528
- "loss": 0.9994,
529
- "rewards/accuracies": 0.5,
530
- "rewards/chosen": 0.013503548689186573,
531
- "rewards/margins": 0.0006495133275166154,
532
- "rewards/rejected": 0.012854036875069141,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
- "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.1030516624450684,
539
- "logits/rejected": -2.087252140045166,
540
- "logps/chosen": -34.154754638671875,
541
- "logps/rejected": -33.087013244628906,
542
- "loss": 1.0057,
543
- "rewards/accuracies": 0.512499988079071,
544
- "rewards/chosen": 0.02217467688024044,
545
- "rewards/margins": -0.005742859095335007,
546
- "rewards/rejected": 0.027917543426156044,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
- "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9748103618621826,
553
- "logits/rejected": -1.9738750457763672,
554
- "logps/chosen": -33.23518371582031,
555
- "logps/rejected": -32.46066665649414,
556
- "loss": 0.9618,
557
- "rewards/accuracies": 0.574999988079071,
558
- "rewards/chosen": 0.04222174361348152,
559
- "rewards/margins": 0.03818847984075546,
560
- "rewards/rejected": 0.004033264704048634,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
- "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9303611516952515,
567
- "logits/rejected": -1.940708875656128,
568
- "logps/chosen": -32.200523376464844,
569
- "logps/rejected": -35.27861785888672,
570
- "loss": 1.0054,
571
- "rewards/accuracies": 0.42500001192092896,
572
- "rewards/chosen": 0.0037514485884457827,
573
- "rewards/margins": -0.005373493768274784,
574
- "rewards/rejected": 0.009124943986535072,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
- "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.069342613220215,
581
- "logits/rejected": -2.062788486480713,
582
- "logps/chosen": -33.65803527832031,
583
- "logps/rejected": -29.199758529663086,
584
- "loss": 1.0295,
585
- "rewards/accuracies": 0.5,
586
- "rewards/chosen": -0.005459528882056475,
587
- "rewards/margins": -0.029461050406098366,
588
- "rewards/rejected": 0.02400151826441288,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
- "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.9290788173675537,
595
- "logits/rejected": -1.931239366531372,
596
- "logps/chosen": -34.229915618896484,
597
- "logps/rejected": -30.90093994140625,
598
- "loss": 0.9663,
599
- "rewards/accuracies": 0.5625,
600
- "rewards/chosen": 0.00822196900844574,
601
- "rewards/margins": 0.03370029479265213,
602
- "rewards/rejected": -0.02547832392156124,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.21873634759481853,
610
- "train_runtime": 627.4596,
611
- "train_samples_per_second": 4.907,
612
- "train_steps_per_second": 0.614
613
  }
614
  ],
615
  "logging_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "learning_rate": 1.282051282051282e-07,
14
  "logits/chosen": -1.7278180122375488,
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
24
  },
25
  {
26
  "epoch": 0.03,
27
+ "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8666962385177612,
29
+ "logits/rejected": -1.8709977865219116,
30
+ "logps/chosen": -36.98939514160156,
31
+ "logps/rejected": -33.66963195800781,
32
+ "loss": 0.6929,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.00017197892884723842,
35
+ "rewards/margins": 0.0005675320862792432,
36
+ "rewards/rejected": -0.0003955531574320048,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
+ "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9978935718536377,
43
+ "logits/rejected": -2.000532627105713,
44
+ "logps/chosen": -29.66562843322754,
45
+ "logps/rejected": -29.045883178710938,
46
+ "loss": 0.6934,
47
+ "rewards/accuracies": 0.36250001192092896,
48
+ "rewards/chosen": -0.00023434234026353806,
49
+ "rewards/margins": -0.0004099405778106302,
50
+ "rewards/rejected": 0.0001755982666509226,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
+ "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9211324453353882,
57
+ "logits/rejected": -1.9184545278549194,
58
+ "logps/chosen": -31.41294288635254,
59
+ "logps/rejected": -33.23053741455078,
60
+ "loss": 0.6931,
61
+ "rewards/accuracies": 0.5249999761581421,
62
+ "rewards/chosen": 3.152530553052202e-05,
63
+ "rewards/margins": 0.000152341352077201,
64
+ "rewards/rejected": -0.00012081606837455183,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
+ "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.017341375350952,
71
+ "logits/rejected": -2.0086092948913574,
72
+ "logps/chosen": -32.60146713256836,
73
+ "logps/rejected": -32.49399185180664,
74
+ "loss": 0.6934,
75
+ "rewards/accuracies": 0.4124999940395355,
76
+ "rewards/chosen": -0.0002466029836796224,
77
+ "rewards/margins": -0.0004333632532507181,
78
+ "rewards/rejected": 0.00018676018225960433,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
+ "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.862633466720581,
85
+ "logits/rejected": -1.8518692255020142,
86
+ "logps/chosen": -33.55931091308594,
87
+ "logps/rejected": -35.44870376586914,
88
+ "loss": 0.6932,
89
+ "rewards/accuracies": 0.5,
90
+ "rewards/chosen": -1.831089502957184e-05,
91
+ "rewards/margins": -5.47249146620743e-05,
92
+ "rewards/rejected": 3.641402145149186e-05,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
+ "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9409154653549194,
99
+ "logits/rejected": -1.9428699016571045,
100
+ "logps/chosen": -32.53916549682617,
101
+ "logps/rejected": -33.24130630493164,
102
+ "loss": 0.6925,
103
+ "rewards/accuracies": 0.612500011920929,
104
+ "rewards/chosen": 0.0006101715262047946,
105
+ "rewards/margins": 0.0013094183523207903,
106
+ "rewards/rejected": -0.0006992466514930129,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
+ "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.070591926574707,
113
+ "logits/rejected": -2.075544834136963,
114
+ "logps/chosen": -34.023067474365234,
115
+ "logps/rejected": -36.647151947021484,
116
+ "loss": 0.6929,
117
+ "rewards/accuracies": 0.5375000238418579,
118
+ "rewards/chosen": -0.00034581663203425705,
119
+ "rewards/margins": 0.0004369783273432404,
120
+ "rewards/rejected": -0.0007827949011698365,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
+ "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9308092594146729,
127
+ "logits/rejected": -1.933943748474121,
128
+ "logps/chosen": -34.318023681640625,
129
+ "logps/rejected": -34.67802429199219,
130
+ "loss": 0.6922,
131
+ "rewards/accuracies": 0.5375000238418579,
132
+ "rewards/chosen": 0.0010978971840813756,
133
+ "rewards/margins": 0.0019540609791874886,
134
+ "rewards/rejected": -0.000856163795106113,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
+ "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9380912780761719,
141
+ "logits/rejected": -1.9425855875015259,
142
+ "logps/chosen": -32.38385009765625,
143
+ "logps/rejected": -32.35346603393555,
144
+ "loss": 0.6928,
145
+ "rewards/accuracies": 0.637499988079071,
146
+ "rewards/chosen": 0.0008357145707122982,
147
+ "rewards/margins": 0.0007813175907358527,
148
+ "rewards/rejected": 5.439693995867856e-05,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
+ "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.035137891769409,
155
+ "logits/rejected": -2.0331528186798096,
156
+ "logps/chosen": -32.112831115722656,
157
+ "logps/rejected": -31.29166030883789,
158
+ "loss": 0.6924,
159
+ "rewards/accuracies": 0.625,
160
+ "rewards/chosen": 0.0012800416443496943,
161
+ "rewards/margins": 0.0015345367137342691,
162
+ "rewards/rejected": -0.0002544948656577617,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2300801277160645,
168
+ "eval_logits/rejected": -2.225238084793091,
169
+ "eval_logps/chosen": -34.04683303833008,
170
+ "eval_logps/rejected": -37.53927230834961,
171
+ "eval_loss": 0.6930972337722778,
172
+ "eval_rewards/accuracies": 0.5186877250671387,
173
+ "eval_rewards/chosen": -0.00012280470400583,
174
+ "eval_rewards/margins": 0.00010372586984885857,
175
+ "eval_rewards/rejected": -0.0002265305956825614,
176
+ "eval_runtime": 145.7259,
177
+ "eval_samples_per_second": 2.354,
178
+ "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
+ "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.989782691001892,
185
+ "logits/rejected": -1.9873950481414795,
186
+ "logps/chosen": -33.12385559082031,
187
+ "logps/rejected": -34.011810302734375,
188
+ "loss": 0.6926,
189
+ "rewards/accuracies": 0.612500011920929,
190
+ "rewards/chosen": 0.0011996207758784294,
191
+ "rewards/margins": 0.001024017808958888,
192
+ "rewards/rejected": 0.00017560287960805,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
+ "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0008151531219482,
199
+ "logits/rejected": -1.992500901222229,
200
+ "logps/chosen": -32.320838928222656,
201
+ "logps/rejected": -32.128170013427734,
202
+ "loss": 0.6927,
203
+ "rewards/accuracies": 0.512499988079071,
204
+ "rewards/chosen": 0.001240686746314168,
205
+ "rewards/margins": 0.0009073130786418915,
206
+ "rewards/rejected": 0.0003333735803607851,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
+ "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0289230346679688,
213
+ "logits/rejected": -2.020946502685547,
214
+ "logps/chosen": -30.313907623291016,
215
+ "logps/rejected": -32.086116790771484,
216
+ "loss": 0.6922,
217
+ "rewards/accuracies": 0.6499999761581421,
218
+ "rewards/chosen": 0.0016180993989109993,
219
+ "rewards/margins": 0.0019491963321343064,
220
+ "rewards/rejected": -0.00033109664218500257,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
+ "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9592479467391968,
227
+ "logits/rejected": -1.9694607257843018,
228
+ "logps/chosen": -31.223953247070312,
229
+ "logps/rejected": -32.547454833984375,
230
+ "loss": 0.6921,
231
+ "rewards/accuracies": 0.6499999761581421,
232
+ "rewards/chosen": 0.0019570994190871716,
233
+ "rewards/margins": 0.002082846825942397,
234
+ "rewards/rejected": -0.00012574761058203876,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
+ "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8708124160766602,
241
+ "logits/rejected": -1.8719879388809204,
242
+ "logps/chosen": -33.877174377441406,
243
+ "logps/rejected": -34.78774642944336,
244
+ "loss": 0.6914,
245
+ "rewards/accuracies": 0.637499988079071,
246
+ "rewards/chosen": 0.003155181184411049,
247
+ "rewards/margins": 0.0034600873477756977,
248
+ "rewards/rejected": -0.00030490627977997065,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
+ "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9215673208236694,
255
+ "logits/rejected": -1.9181665182113647,
256
+ "logps/chosen": -36.011531829833984,
257
+ "logps/rejected": -32.685707092285156,
258
+ "loss": 0.6925,
259
+ "rewards/accuracies": 0.612500011920929,
260
+ "rewards/chosen": 0.001601455733180046,
261
+ "rewards/margins": 0.001231002388522029,
262
+ "rewards/rejected": 0.00037045328645035625,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
+ "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.021604061126709,
269
+ "logits/rejected": -2.014291524887085,
270
+ "logps/chosen": -33.482086181640625,
271
+ "logps/rejected": -31.404422760009766,
272
+ "loss": 0.6913,
273
+ "rewards/accuracies": 0.7124999761581421,
274
+ "rewards/chosen": 0.0031577465124428272,
275
+ "rewards/margins": 0.003683448536321521,
276
+ "rewards/rejected": -0.0005257020820863545,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
+ "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.027444839477539,
283
+ "logits/rejected": -2.032665729522705,
284
+ "logps/chosen": -32.183101654052734,
285
+ "logps/rejected": -32.39936065673828,
286
+ "loss": 0.6918,
287
+ "rewards/accuracies": 0.6625000238418579,
288
+ "rewards/chosen": 0.0036168727092444897,
289
+ "rewards/margins": 0.0027590212412178516,
290
+ "rewards/rejected": 0.0008578516426496208,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
+ "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.027879238128662,
297
+ "logits/rejected": -2.025132656097412,
298
+ "logps/chosen": -31.258464813232422,
299
+ "logps/rejected": -31.348388671875,
300
+ "loss": 0.6919,
301
+ "rewards/accuracies": 0.6625000238418579,
302
+ "rewards/chosen": 0.0023048892617225647,
303
+ "rewards/margins": 0.0026066480204463005,
304
+ "rewards/rejected": -0.000301758642308414,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
+ "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.8983129262924194,
311
+ "logits/rejected": -1.902967095375061,
312
+ "logps/chosen": -31.276391983032227,
313
+ "logps/rejected": -32.81935119628906,
314
+ "loss": 0.6914,
315
+ "rewards/accuracies": 0.7124999761581421,
316
+ "rewards/chosen": 0.0032989257015287876,
317
+ "rewards/margins": 0.00358308176510036,
318
+ "rewards/rejected": -0.00028415597626008093,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2249655723571777,
324
+ "eval_logits/rejected": -2.220139503479004,
325
+ "eval_logps/chosen": -34.04255294799805,
326
+ "eval_logps/rejected": -37.55300521850586,
327
+ "eval_loss": 0.6930080056190491,
328
+ "eval_rewards/accuracies": 0.5245016813278198,
329
+ "eval_rewards/chosen": -8.006239659152925e-05,
330
+ "eval_rewards/margins": 0.00028380370349623263,
331
+ "eval_rewards/rejected": -0.00036386612919159234,
332
+ "eval_runtime": 145.5269,
333
+ "eval_samples_per_second": 2.357,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
+ "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.010593891143799,
341
+ "logits/rejected": -2.021207332611084,
342
+ "logps/chosen": -31.7437801361084,
343
+ "logps/rejected": -33.93886947631836,
344
+ "loss": 0.6916,
345
+ "rewards/accuracies": 0.574999988079071,
346
+ "rewards/chosen": 0.0022752191871404648,
347
+ "rewards/margins": 0.003036911366507411,
348
+ "rewards/rejected": -0.000761692295782268,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
+ "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9029136896133423,
355
+ "logits/rejected": -1.9176632165908813,
356
+ "logps/chosen": -29.78145408630371,
357
+ "logps/rejected": -31.63638687133789,
358
+ "loss": 0.6911,
359
+ "rewards/accuracies": 0.7749999761581421,
360
+ "rewards/chosen": 0.003300876123830676,
361
+ "rewards/margins": 0.0040829661302268505,
362
+ "rewards/rejected": -0.000782089657150209,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
+ "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9591538906097412,
369
+ "logits/rejected": -1.9631026983261108,
370
+ "logps/chosen": -33.05189895629883,
371
+ "logps/rejected": -31.594707489013672,
372
+ "loss": 0.6911,
373
+ "rewards/accuracies": 0.612500011920929,
374
+ "rewards/chosen": 0.003655704203993082,
375
+ "rewards/margins": 0.004109731875360012,
376
+ "rewards/rejected": -0.00045402703108265996,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
+ "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9572566747665405,
383
+ "logits/rejected": -1.9354870319366455,
384
+ "logps/chosen": -33.83857727050781,
385
+ "logps/rejected": -35.12303924560547,
386
+ "loss": 0.6907,
387
+ "rewards/accuracies": 0.6875,
388
+ "rewards/chosen": 0.0031517534516751766,
389
+ "rewards/margins": 0.004874187987297773,
390
+ "rewards/rejected": -0.0017224351176992059,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
+ "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -1.998875379562378,
397
+ "logits/rejected": -1.9955555200576782,
398
+ "logps/chosen": -32.72559356689453,
399
+ "logps/rejected": -36.2435417175293,
400
+ "loss": 0.6921,
401
+ "rewards/accuracies": 0.5874999761581421,
402
+ "rewards/chosen": 0.0019947518594563007,
403
+ "rewards/margins": 0.0021809376776218414,
404
+ "rewards/rejected": -0.0001861859782366082,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
+ "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8661177158355713,
411
+ "logits/rejected": -1.8636993169784546,
412
+ "logps/chosen": -33.959014892578125,
413
+ "logps/rejected": -35.526344299316406,
414
+ "loss": 0.6919,
415
+ "rewards/accuracies": 0.637499988079071,
416
+ "rewards/chosen": 0.0022938635665923357,
417
+ "rewards/margins": 0.0025111136492341757,
418
+ "rewards/rejected": -0.0002172500389860943,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
+ "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8511241674423218,
425
+ "logits/rejected": -1.8487510681152344,
426
+ "logps/chosen": -34.16337585449219,
427
+ "logps/rejected": -31.830408096313477,
428
+ "loss": 0.6917,
429
+ "rewards/accuracies": 0.625,
430
+ "rewards/chosen": 0.002342230873182416,
431
+ "rewards/margins": 0.0029330006800591946,
432
+ "rewards/rejected": -0.0005907699232921004,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
+ "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9535648822784424,
439
+ "logits/rejected": -1.943101167678833,
440
+ "logps/chosen": -35.01304244995117,
441
+ "logps/rejected": -31.87521743774414,
442
+ "loss": 0.6913,
443
+ "rewards/accuracies": 0.6625000238418579,
444
+ "rewards/chosen": 0.0034359837882220745,
445
+ "rewards/margins": 0.0037782168947160244,
446
+ "rewards/rejected": -0.0003422332229092717,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
+ "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.048783302307129,
453
+ "logits/rejected": -2.0339112281799316,
454
+ "logps/chosen": -30.716812133789062,
455
+ "logps/rejected": -32.62614059448242,
456
+ "loss": 0.6922,
457
+ "rewards/accuracies": 0.5625,
458
+ "rewards/chosen": 0.0020731096155941486,
459
+ "rewards/margins": 0.001816184027120471,
460
+ "rewards/rejected": 0.00025692558847367764,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
+ "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9198474884033203,
467
+ "logits/rejected": -1.917340636253357,
468
+ "logps/chosen": -32.29683303833008,
469
+ "logps/rejected": -30.91409683227539,
470
+ "loss": 0.6895,
471
+ "rewards/accuracies": 0.737500011920929,
472
+ "rewards/chosen": 0.0063027567230165005,
473
+ "rewards/margins": 0.007275627460330725,
474
+ "rewards/rejected": -0.0009728703880682588,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.221111297607422,
480
+ "eval_logits/rejected": -2.2162926197052,
481
+ "eval_logps/chosen": -34.0648307800293,
482
+ "eval_logps/rejected": -37.58684158325195,
483
+ "eval_loss": 0.6929495930671692,
484
+ "eval_rewards/accuracies": 0.5419435501098633,
485
+ "eval_rewards/chosen": -0.0003027978236787021,
486
+ "eval_rewards/margins": 0.0003993964346591383,
487
+ "eval_rewards/rejected": -0.0007021942874416709,
488
+ "eval_runtime": 145.7415,
489
  "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
+ "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.905469298362732,
497
+ "logits/rejected": -1.9022337198257446,
498
+ "logps/chosen": -31.291845321655273,
499
+ "logps/rejected": -33.816497802734375,
500
+ "loss": 0.6912,
501
+ "rewards/accuracies": 0.75,
502
+ "rewards/chosen": 0.003115057712420821,
503
+ "rewards/margins": 0.003858551848679781,
504
+ "rewards/rejected": -0.0007434942526742816,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
+ "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9548442363739014,
511
+ "logits/rejected": -1.9426848888397217,
512
+ "logps/chosen": -34.22846603393555,
513
+ "logps/rejected": -33.660888671875,
514
+ "loss": 0.6907,
515
+ "rewards/accuracies": 0.6625000238418579,
516
+ "rewards/chosen": 0.0036761020310223103,
517
+ "rewards/margins": 0.004896566737443209,
518
+ "rewards/rejected": -0.0012204641243442893,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
+ "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9896023273468018,
525
+ "logits/rejected": -1.9881556034088135,
526
+ "logps/chosen": -33.096466064453125,
527
+ "logps/rejected": -32.550086975097656,
528
+ "loss": 0.6909,
529
+ "rewards/accuracies": 0.7124999761581421,
530
+ "rewards/chosen": 0.0038932852912694216,
531
+ "rewards/margins": 0.004598308354616165,
532
+ "rewards/rejected": -0.0007050234125927091,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
+ "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0761990547180176,
539
+ "logits/rejected": -2.0605435371398926,
540
+ "logps/chosen": -33.7951774597168,
541
+ "logps/rejected": -33.09763717651367,
542
+ "loss": 0.6913,
543
+ "rewards/accuracies": 0.6499999761581421,
544
+ "rewards/chosen": 0.0038421000353991985,
545
+ "rewards/margins": 0.0036381154786795378,
546
+ "rewards/rejected": 0.00020398428023327142,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
+ "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9492791891098022,
553
+ "logits/rejected": -1.9484554529190063,
554
+ "logps/chosen": -32.777854919433594,
555
+ "logps/rejected": -32.48105239868164,
556
+ "loss": 0.6906,
557
+ "rewards/accuracies": 0.6625000238418579,
558
+ "rewards/chosen": 0.005042382050305605,
559
+ "rewards/margins": 0.00520141888409853,
560
+ "rewards/rejected": -0.00015903706662356853,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
+ "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9036123752593994,
567
+ "logits/rejected": -1.9138826131820679,
568
+ "logps/chosen": -31.845821380615234,
569
+ "logps/rejected": -35.32244873046875,
570
+ "loss": 0.6912,
571
+ "rewards/accuracies": 0.6875,
572
+ "rewards/chosen": 0.003588717896491289,
573
+ "rewards/margins": 0.003925635013729334,
574
+ "rewards/rejected": -0.00033691702992655337,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
+ "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.043790102005005,
581
+ "logits/rejected": -2.0373566150665283,
582
+ "logps/chosen": -33.307098388671875,
583
+ "logps/rejected": -29.227703094482422,
584
+ "loss": 0.6914,
585
+ "rewards/accuracies": 0.6875,
586
+ "rewards/chosen": 0.003448712872341275,
587
+ "rewards/margins": 0.0034615055192261934,
588
+ "rewards/rejected": -1.2792841516784392e-05,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
+ "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9043537378311157,
595
+ "logits/rejected": -1.9065628051757812,
596
+ "logps/chosen": -33.83999252319336,
597
+ "logps/rejected": -30.96780776977539,
598
+ "loss": 0.6907,
599
+ "rewards/accuracies": 0.7124999761581421,
600
+ "rewards/chosen": 0.003990581724792719,
601
+ "rewards/margins": 0.004942373372614384,
602
+ "rewards/rejected": -0.0009517916478216648,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.6918259131443965,
610
+ "train_runtime": 3249.6401,
611
+ "train_samples_per_second": 0.947,
612
+ "train_steps_per_second": 0.118
613
  }
614
  ],
615
  "logging_steps": 10,