hugodk-sch commited on
Commit
592d91c
1 Parent(s): 085b079

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +378 -378
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 2.7902
24
- - Rewards/chosen: -0.0060
25
- - Rewards/rejected: -0.0171
26
- - Rewards/accuracies: 0.5602
27
- - Rewards/margins: 0.0111
28
- - Logps/rejected: -37.5735
29
- - Logps/chosen: -34.0545
30
- - Logits/rejected: -2.2247
31
- - Logits/chosen: -2.2295
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 2.59 | 0.26 | 100 | 2.8958 | 0.0029 | 0.0052 | 0.4730 | -0.0024 | -37.4993 | -34.0250 | -2.2305 | -2.2353 |
67
- | 2.2795 | 0.52 | 200 | 2.8012 | -0.0060 | -0.0145 | 0.5278 | 0.0085 | -37.5651 | -34.0545 | -2.2290 | -2.2339 |
68
- | 1.7902 | 0.78 | 300 | 2.7585 | -0.0030 | -0.0167 | 0.5748 | 0.0137 | -37.5724 | -34.0446 | -2.2245 | -2.2294 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.9891
21
+ - Rewards/chosen: -0.0205
22
+ - Rewards/rejected: -0.0313
23
+ - Rewards/accuracies: 0.5183
24
+ - Rewards/margins: 0.0108
25
+ - Logps/rejected: -37.6211
26
+ - Logps/chosen: -34.1029
27
+ - Logits/rejected: -2.2202
28
+ - Logits/chosen: -2.2250
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.956 | 0.26 | 100 | 0.9948 | -0.0045 | -0.0098 | 0.5631 | 0.0053 | -37.5492 | -34.0495 | -2.2276 | -2.2325 |
64
+ | 0.8922 | 0.52 | 200 | 0.9876 | -0.0094 | -0.0218 | 0.5303 | 0.0124 | -37.5894 | -34.0659 | -2.2231 | -2.2279 |
65
+ | 0.8033 | 0.78 | 300 | 0.9891 | -0.0205 | -0.0313 | 0.5183 | 0.0108 | -37.6211 | -34.1029 | -2.2202 | -2.2250 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:997b36b01dd608f35abda62e68f900f81f4c27b6fbdb3117bd16ca385e56da53
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15df4126b86e5931a293f8a29058c83956e64751dc461efe761ddfc8ddf1affc
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2295007705688477,
4
- "eval_logits/rejected": -2.2246556282043457,
5
- "eval_logps/chosen": -34.054466247558594,
6
- "eval_logps/rejected": -37.57353973388672,
7
- "eval_loss": 2.790188789367676,
8
- "eval_rewards/accuracies": 0.560215950012207,
9
- "eval_rewards/chosen": -0.0059735761024057865,
10
- "eval_rewards/margins": 0.011101600714027882,
11
- "eval_rewards/rejected": -0.017075177282094955,
12
- "eval_runtime": 145.7539,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.353,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 2.3457992528940177,
17
- "train_runtime": 3250.6949,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.9217340717067967,
4
+ "train_runtime": 3251.2729,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 2.3457992528940177,
4
- "train_runtime": 3250.6949,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.9217340717067967,
4
+ "train_runtime": 3251.2729,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 2.7778,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,311 +25,311 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8667066097259521,
29
- "logits/rejected": -1.8710108995437622,
30
- "logps/chosen": -36.97679901123047,
31
- "logps/rejected": -33.63804626464844,
32
- "loss": 2.6754,
33
- "rewards/accuracies": 0.5138888955116272,
34
- "rewards/chosen": 0.008938448503613472,
35
- "rewards/margins": 0.011331514455378056,
36
- "rewards/rejected": -0.0023930652532726526,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9984171390533447,
43
- "logits/rejected": -2.001072406768799,
44
- "logps/chosen": -29.64606285095215,
45
- "logps/rejected": -29.043380737304688,
46
- "loss": 2.8951,
47
- "rewards/accuracies": 0.4124999940395355,
48
- "rewards/chosen": -0.0011603410821408033,
49
- "rewards/margins": -0.007179437670856714,
50
- "rewards/rejected": 0.006019095424562693,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9207251071929932,
57
- "logits/rejected": -1.918031096458435,
58
- "logps/chosen": -31.397014617919922,
59
- "logps/rejected": -33.21030044555664,
60
- "loss": 2.7749,
61
- "rewards/accuracies": 0.5874999761581421,
62
- "rewards/chosen": 0.0057237339206039906,
63
- "rewards/margins": 0.0032763103954494,
64
- "rewards/rejected": 0.0024474230594933033,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.017704486846924,
71
- "logits/rejected": -2.0089590549468994,
72
- "logps/chosen": -32.57511520385742,
73
- "logps/rejected": -32.51970672607422,
74
- "loss": 2.7984,
75
- "rewards/accuracies": 0.5,
76
- "rewards/chosen": 0.0005076262168586254,
77
- "rewards/margins": 0.002619259525090456,
78
- "rewards/rejected": -0.0021116335410624743,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8626596927642822,
85
- "logits/rejected": -1.8518798351287842,
86
- "logps/chosen": -33.531227111816406,
87
- "logps/rejected": -35.452476501464844,
88
- "loss": 2.7501,
89
- "rewards/accuracies": 0.48750001192092896,
90
- "rewards/chosen": 0.007876711897552013,
91
- "rewards/margins": 0.00791595596820116,
92
- "rewards/rejected": -3.924337215721607e-05,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9410473108291626,
99
- "logits/rejected": -1.9429900646209717,
100
- "logps/chosen": -32.554359436035156,
101
- "logps/rejected": -33.228172302246094,
102
- "loss": 2.6127,
103
- "rewards/accuracies": 0.6000000238418579,
104
- "rewards/chosen": 0.01374770700931549,
105
- "rewards/margins": 0.03078722581267357,
106
- "rewards/rejected": -0.017039518803358078,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.072329044342041,
113
- "logits/rejected": -2.0772993564605713,
114
- "logps/chosen": -33.98577880859375,
115
- "logps/rejected": -36.6306266784668,
116
- "loss": 2.7121,
117
- "rewards/accuracies": 0.574999988079071,
118
- "rewards/chosen": 0.0008118894184008241,
119
- "rewards/margins": 0.0193378534168005,
120
- "rewards/rejected": -0.01852596551179886,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9340860843658447,
127
- "logits/rejected": -1.937217354774475,
128
- "logps/chosen": -34.292659759521484,
129
- "logps/rejected": -34.65100860595703,
130
- "loss": 2.3666,
131
- "rewards/accuracies": 0.625,
132
- "rewards/chosen": 0.040544699877500534,
133
- "rewards/margins": 0.05812396854162216,
134
- "rewards/rejected": -0.017579272389411926,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9430879354476929,
141
- "logits/rejected": -1.9476264715194702,
142
- "logps/chosen": -32.375579833984375,
143
- "logps/rejected": -32.32474899291992,
144
- "loss": 2.7344,
145
  "rewards/accuracies": 0.6625000238418579,
146
- "rewards/chosen": 0.02755170688033104,
147
- "rewards/margins": 0.017304658889770508,
148
- "rewards/rejected": 0.010247047990560532,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0406196117401123,
155
- "logits/rejected": -2.0386359691619873,
156
- "logps/chosen": -32.16400146484375,
157
- "logps/rejected": -31.278045654296875,
158
- "loss": 2.59,
159
- "rewards/accuracies": 0.612500011920929,
160
- "rewards/chosen": 0.023049456998705864,
161
- "rewards/margins": 0.02659946121275425,
162
- "rewards/rejected": -0.0035500028170645237,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2353110313415527,
168
- "eval_logits/rejected": -2.2304632663726807,
169
- "eval_logps/chosen": -34.025028228759766,
170
- "eval_logps/rejected": -37.4992561340332,
171
- "eval_loss": 2.8957631587982178,
172
- "eval_rewards/accuracies": 0.4730066657066345,
173
- "eval_rewards/chosen": 0.0028568038251250982,
174
- "eval_rewards/margins": -0.0023524421267211437,
175
- "eval_rewards/rejected": 0.005209244787693024,
176
- "eval_runtime": 146.1162,
177
- "eval_samples_per_second": 2.347,
178
- "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9961220026016235,
185
- "logits/rejected": -1.993739128112793,
186
- "logps/chosen": -33.09939193725586,
187
- "logps/rejected": -34.00792694091797,
188
- "loss": 2.7463,
189
- "rewards/accuracies": 0.637499988079071,
190
- "rewards/chosen": 0.043326906859874725,
191
- "rewards/margins": 0.036895059049129486,
192
- "rewards/rejected": 0.006431845016777515,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0079345703125,
199
- "logits/rejected": -1.999603509902954,
200
- "logps/chosen": -32.316917419433594,
201
- "logps/rejected": -32.12470245361328,
202
- "loss": 2.6293,
203
- "rewards/accuracies": 0.574999988079071,
204
- "rewards/chosen": 0.038397181779146194,
205
- "rewards/margins": 0.027356009930372238,
206
- "rewards/rejected": 0.011041165329515934,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0357651710510254,
213
- "logits/rejected": -2.0277907848358154,
214
- "logps/chosen": -30.32888412475586,
215
- "logps/rejected": -32.07011413574219,
216
- "loss": 2.5267,
217
- "rewards/accuracies": 0.5874999761581421,
218
- "rewards/chosen": 0.04404935985803604,
219
- "rewards/margins": 0.049181826412677765,
220
- "rewards/rejected": -0.005132470745593309,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9659456014633179,
227
- "logits/rejected": -1.9761817455291748,
228
- "logps/chosen": -31.22298240661621,
229
- "logps/rejected": -32.57875442504883,
230
- "loss": 2.3006,
231
- "rewards/accuracies": 0.637499988079071,
232
- "rewards/chosen": 0.05900341272354126,
233
- "rewards/margins": 0.0721658319234848,
234
- "rewards/rejected": -0.013162411749362946,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.877907156944275,
241
- "logits/rejected": -1.879045844078064,
242
- "logps/chosen": -33.954994201660156,
243
- "logps/rejected": -34.817832946777344,
244
- "loss": 2.2877,
245
- "rewards/accuracies": 0.637499988079071,
246
- "rewards/chosen": 0.07130923122167587,
247
- "rewards/margins": 0.08948297053575516,
248
- "rewards/rejected": -0.018173744902014732,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9301284551620483,
255
- "logits/rejected": -1.9266859292984009,
256
- "logps/chosen": -36.037498474121094,
257
- "logps/rejected": -32.7092170715332,
258
- "loss": 2.5282,
259
- "rewards/accuracies": 0.6000000238418579,
260
- "rewards/chosen": 0.04025455564260483,
261
- "rewards/margins": 0.036193959414958954,
262
- "rewards/rejected": 0.004060596693307161,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.03144907951355,
269
- "logits/rejected": -2.0240814685821533,
270
- "logps/chosen": -33.5056037902832,
271
- "logps/rejected": -31.39798355102539,
272
- "loss": 2.1268,
273
- "rewards/accuracies": 0.637499988079071,
274
- "rewards/chosen": 0.08767756074666977,
275
- "rewards/margins": 0.10151807963848114,
276
- "rewards/rejected": -0.01384051889181137,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.037555694580078,
283
- "logits/rejected": -2.0428080558776855,
284
- "logps/chosen": -32.268028259277344,
285
- "logps/rejected": -32.48134231567383,
286
- "loss": 2.1694,
287
- "rewards/accuracies": 0.637499988079071,
288
- "rewards/chosen": 0.08302746713161469,
289
- "rewards/margins": 0.08188783377408981,
290
- "rewards/rejected": 0.0011396423215046525,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.0385537147521973,
297
- "logits/rejected": -2.0357697010040283,
298
- "logps/chosen": -31.26275062561035,
299
- "logps/rejected": -31.31881332397461,
300
- "loss": 2.411,
301
- "rewards/accuracies": 0.699999988079071,
302
- "rewards/chosen": 0.0678616464138031,
303
- "rewards/margins": 0.06804122775793076,
304
- "rewards/rejected": -0.00017957761883735657,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9085556268692017,
311
- "logits/rejected": -1.9132124185562134,
312
- "logps/chosen": -31.326763153076172,
313
- "logps/rejected": -32.80078887939453,
314
- "loss": 2.2795,
315
- "rewards/accuracies": 0.6499999761581421,
316
- "rewards/chosen": 0.0838552787899971,
317
- "rewards/margins": 0.08681019395589828,
318
- "rewards/rejected": -0.002954920055344701,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2338554859161377,
324
- "eval_logits/rejected": -2.2290101051330566,
325
- "eval_logps/chosen": -34.05450439453125,
326
- "eval_logps/rejected": -37.565059661865234,
327
- "eval_loss": 2.801168918609619,
328
- "eval_rewards/accuracies": 0.5278239250183105,
329
- "eval_rewards/chosen": -0.005985844414681196,
330
- "eval_rewards/margins": 0.0085463160648942,
331
- "eval_rewards/rejected": -0.014532160945236683,
332
- "eval_runtime": 145.8316,
333
  "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
@@ -337,277 +337,277 @@
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.020181179046631,
341
- "logits/rejected": -2.030860424041748,
342
- "logps/chosen": -31.76279067993164,
343
- "logps/rejected": -33.952571868896484,
344
- "loss": 2.1351,
345
- "rewards/accuracies": 0.6625000238418579,
346
- "rewards/chosen": 0.06255346536636353,
347
- "rewards/margins": 0.08951519429683685,
348
- "rewards/rejected": -0.026961728930473328,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.912184476852417,
355
- "logits/rejected": -1.926975965499878,
356
- "logps/chosen": -29.868118286132812,
357
- "logps/rejected": -31.57167625427246,
358
- "loss": 2.231,
359
- "rewards/accuracies": 0.6875,
360
- "rewards/chosen": 0.07302670180797577,
361
- "rewards/margins": 0.07707642018795013,
362
- "rewards/rejected": -0.004049715120345354,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.968927025794983,
369
- "logits/rejected": -1.972905158996582,
370
- "logps/chosen": -33.126705169677734,
371
- "logps/rejected": -31.619220733642578,
372
- "loss": 2.0957,
373
- "rewards/accuracies": 0.7124999761581421,
374
- "rewards/chosen": 0.0872274860739708,
375
- "rewards/margins": 0.10820253938436508,
376
- "rewards/rejected": -0.02097504958510399,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.967507004737854,
383
- "logits/rejected": -1.9456119537353516,
384
- "logps/chosen": -33.865623474121094,
385
- "logps/rejected": -35.0605583190918,
386
- "loss": 1.9762,
387
- "rewards/accuracies": 0.762499988079071,
388
- "rewards/chosen": 0.08643803745508194,
389
- "rewards/margins": 0.11936646699905396,
390
- "rewards/rejected": -0.03292842581868172,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.008720874786377,
397
- "logits/rejected": -2.0053982734680176,
398
- "logps/chosen": -32.73490524291992,
399
- "logps/rejected": -36.2797966003418,
400
- "loss": 2.2614,
401
- "rewards/accuracies": 0.675000011920929,
402
- "rewards/chosen": 0.05704798549413681,
403
- "rewards/margins": 0.07351039350032806,
404
- "rewards/rejected": -0.016462400555610657,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8761491775512695,
411
- "logits/rejected": -1.8737146854400635,
412
- "logps/chosen": -33.989051818847656,
413
- "logps/rejected": -35.516883850097656,
414
- "loss": 2.4128,
415
- "rewards/accuracies": 0.6875,
416
- "rewards/chosen": 0.05980368331074715,
417
- "rewards/margins": 0.06348178535699844,
418
- "rewards/rejected": -0.003678102744743228,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8607820272445679,
425
- "logits/rejected": -1.8583341836929321,
426
- "logps/chosen": -34.1697998046875,
427
- "logps/rejected": -31.77322769165039,
428
- "loss": 2.33,
429
- "rewards/accuracies": 0.699999988079071,
430
- "rewards/chosen": 0.06833983957767487,
431
- "rewards/margins": 0.06890784204006195,
432
- "rewards/rejected": -0.0005679976311512291,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9649995565414429,
439
- "logits/rejected": -1.954466462135315,
440
- "logps/chosen": -35.01842498779297,
441
- "logps/rejected": -31.856042861938477,
442
- "loss": 2.0235,
443
- "rewards/accuracies": 0.7124999761581421,
444
- "rewards/chosen": 0.10146405547857285,
445
- "rewards/margins": 0.10597936809062958,
446
- "rewards/rejected": -0.004515302833169699,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.059657096862793,
453
- "logits/rejected": -2.0446982383728027,
454
- "logps/chosen": -30.713714599609375,
455
- "logps/rejected": -32.61918258666992,
456
- "loss": 2.5851,
457
- "rewards/accuracies": 0.612500011920929,
458
- "rewards/chosen": 0.06312306225299835,
459
- "rewards/margins": 0.0533272810280323,
460
- "rewards/rejected": 0.00979578960686922,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9307676553726196,
467
- "logits/rejected": -1.9282405376434326,
468
- "logps/chosen": -32.467708587646484,
469
- "logps/rejected": -30.857818603515625,
470
- "loss": 1.7902,
471
- "rewards/accuracies": 0.737500011920929,
472
- "rewards/chosen": 0.1378202736377716,
473
- "rewards/margins": 0.15012334287166595,
474
- "rewards/rejected": -0.012303064577281475,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2293622493743896,
480
- "eval_logits/rejected": -2.2245147228240967,
481
- "eval_logps/chosen": -34.04458236694336,
482
- "eval_logps/rejected": -37.57244873046875,
483
- "eval_loss": 2.758474826812744,
484
- "eval_rewards/accuracies": 0.5747508406639099,
485
- "eval_rewards/chosen": -0.003010095562785864,
486
- "eval_rewards/margins": 0.013738218694925308,
487
- "eval_rewards/rejected": -0.01674831472337246,
488
- "eval_runtime": 145.5146,
489
- "eval_samples_per_second": 2.357,
490
- "eval_steps_per_second": 0.296,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9147694110870361,
497
- "logits/rejected": -1.9114938974380493,
498
- "logps/chosen": -31.310047149658203,
499
- "logps/rejected": -33.77363967895508,
500
- "loss": 2.1768,
501
- "rewards/accuracies": 0.75,
502
- "rewards/chosen": 0.08799003809690475,
503
- "rewards/margins": 0.09743582457304001,
504
- "rewards/rejected": -0.009445784613490105,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.965264081954956,
511
- "logits/rejected": -1.953029990196228,
512
- "logps/chosen": -34.34061050415039,
513
- "logps/rejected": -33.643863677978516,
514
- "loss": 1.9927,
515
- "rewards/accuracies": 0.7124999761581421,
516
- "rewards/chosen": 0.07663901150226593,
517
- "rewards/margins": 0.10814561694860458,
518
- "rewards/rejected": -0.03150660917162895,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9999011754989624,
525
- "logits/rejected": -1.9984591007232666,
526
- "logps/chosen": -33.24303436279297,
527
- "logps/rejected": -32.54120635986328,
528
- "loss": 2.1065,
529
- "rewards/accuracies": 0.6875,
530
- "rewards/chosen": 0.0728277713060379,
531
- "rewards/margins": 0.09131507575511932,
532
- "rewards/rejected": -0.01848730817437172,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.086862802505493,
539
- "logits/rejected": -2.0711212158203125,
540
- "logps/chosen": -33.77810287475586,
541
- "logps/rejected": -33.093299865722656,
542
- "loss": 2.0709,
543
- "rewards/accuracies": 0.75,
544
- "rewards/chosen": 0.12038693577051163,
545
- "rewards/margins": 0.1129666194319725,
546
- "rewards/rejected": 0.007420300040394068,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9597351551055908,
553
- "logits/rejected": -1.9588581323623657,
554
- "logps/chosen": -32.87625503540039,
555
- "logps/rejected": -32.52130889892578,
556
- "loss": 1.9515,
557
- "rewards/accuracies": 0.800000011920929,
558
- "rewards/chosen": 0.12175308167934418,
559
- "rewards/margins": 0.13860100507736206,
560
- "rewards/rejected": -0.016847927123308182,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9148937463760376,
567
- "logits/rejected": -1.9252065420150757,
568
- "logps/chosen": -31.9158935546875,
569
- "logps/rejected": -35.31604766845703,
570
- "loss": 2.1343,
571
- "rewards/accuracies": 0.7250000238418579,
572
- "rewards/chosen": 0.08664007484912872,
573
- "rewards/margins": 0.09482574462890625,
574
- "rewards/rejected": -0.008185659535229206,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.053968906402588,
581
- "logits/rejected": -2.047475576400757,
582
- "logps/chosen": -33.36884307861328,
583
- "logps/rejected": -29.23018455505371,
584
- "loss": 2.1339,
585
- "rewards/accuracies": 0.762499988079071,
586
- "rewards/chosen": 0.08493683487176895,
587
- "rewards/margins": 0.08606470376253128,
588
- "rewards/rejected": -0.0011278685415163636,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9135916233062744,
595
- "logits/rejected": -1.9158084392547607,
596
- "logps/chosen": -33.879058837890625,
597
- "logps/rejected": -30.930347442626953,
598
- "loss": 1.9191,
599
- "rewards/accuracies": 0.7749999761581421,
600
- "rewards/chosen": 0.10799793899059296,
601
- "rewards/margins": 0.12531307339668274,
602
- "rewards/rejected": -0.017315123230218887,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 2.3457992528940177,
610
- "train_runtime": 3250.6949,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.866829752922058,
29
+ "logits/rejected": -1.8711390495300293,
30
+ "logps/chosen": -36.97040557861328,
31
+ "logps/rejected": -33.66280746459961,
32
+ "loss": 0.9793,
33
+ "rewards/accuracies": 0.5972222089767456,
34
+ "rewards/chosen": 0.010857796296477318,
35
+ "rewards/margins": 0.020676644518971443,
36
+ "rewards/rejected": -0.009818848222494125,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9976301193237305,
43
+ "logits/rejected": -2.0002708435058594,
44
+ "logps/chosen": -29.64252281188965,
45
+ "logps/rejected": -29.05857276916504,
46
+ "loss": 1.0016,
47
+ "rewards/accuracies": 0.4749999940395355,
48
+ "rewards/chosen": -9.895558468997478e-05,
49
+ "rewards/margins": -0.0015598980244249105,
50
+ "rewards/rejected": 0.0014609418576583266,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9210872650146484,
57
+ "logits/rejected": -1.918402910232544,
58
+ "logps/chosen": -31.397838592529297,
59
+ "logps/rejected": -33.22719192504883,
60
+ "loss": 0.9919,
61
+ "rewards/accuracies": 0.612500011920929,
62
+ "rewards/chosen": 0.005476226564496756,
63
+ "rewards/margins": 0.00809534452855587,
64
+ "rewards/rejected": -0.0026191179640591145,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.017606258392334,
71
+ "logits/rejected": -2.0088634490966797,
72
+ "logps/chosen": -32.566612243652344,
73
+ "logps/rejected": -32.52539825439453,
74
+ "loss": 0.9931,
75
+ "rewards/accuracies": 0.5375000238418579,
76
+ "rewards/chosen": 0.0030602235347032547,
77
+ "rewards/margins": 0.0068772160448133945,
78
+ "rewards/rejected": -0.0038169927429407835,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8625805377960205,
85
+ "logits/rejected": -1.8518139123916626,
86
+ "logps/chosen": -33.56818771362305,
87
+ "logps/rejected": -35.4713020324707,
88
+ "loss": 0.9975,
89
+ "rewards/accuracies": 0.550000011920929,
90
+ "rewards/chosen": -0.0032131080515682697,
91
+ "rewards/margins": 0.0024742281530052423,
92
+ "rewards/rejected": -0.005687335971742868,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9413166046142578,
99
+ "logits/rejected": -1.9432499408721924,
100
+ "logps/chosen": -32.55731964111328,
101
+ "logps/rejected": -33.21186065673828,
102
+ "loss": 0.975,
103
+ "rewards/accuracies": 0.5249999761581421,
104
+ "rewards/chosen": 0.012859411537647247,
105
+ "rewards/margins": 0.02500327304005623,
106
+ "rewards/rejected": -0.01214386522769928,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0717270374298096,
113
+ "logits/rejected": -2.0766983032226562,
114
+ "logps/chosen": -34.008384704589844,
115
+ "logps/rejected": -36.6549186706543,
116
+ "loss": 0.9802,
117
+ "rewards/accuracies": 0.6000000238418579,
118
+ "rewards/chosen": -0.0059700957499444485,
119
+ "rewards/margins": 0.01984369568526745,
120
+ "rewards/rejected": -0.025813793763518333,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.932422399520874,
127
+ "logits/rejected": -1.9355411529541016,
128
+ "logps/chosen": -34.327762603759766,
129
+ "logps/rejected": -34.66461944580078,
130
+ "loss": 0.9483,
131
+ "rewards/accuracies": 0.5625,
132
+ "rewards/chosen": 0.03001437522470951,
133
+ "rewards/margins": 0.05167943984270096,
134
+ "rewards/rejected": -0.02166506089270115,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9404243230819702,
141
+ "logits/rejected": -1.944933533668518,
142
+ "logps/chosen": -32.38780975341797,
143
+ "logps/rejected": -32.35973358154297,
144
+ "loss": 0.9759,
145
  "rewards/accuracies": 0.6625000238418579,
146
+ "rewards/chosen": 0.02388269081711769,
147
+ "rewards/margins": 0.024130593985319138,
148
+ "rewards/rejected": -0.00024790168390609324,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.0375094413757324,
155
+ "logits/rejected": -2.0355331897735596,
156
+ "logps/chosen": -32.14101028442383,
157
+ "logps/rejected": -31.31320571899414,
158
+ "loss": 0.956,
159
+ "rewards/accuracies": 0.6000000238418579,
160
+ "rewards/chosen": 0.029947593808174133,
161
+ "rewards/margins": 0.04404589533805847,
162
+ "rewards/rejected": -0.014098303392529488,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.232485294342041,
168
+ "eval_logits/rejected": -2.227635622024536,
169
+ "eval_logps/chosen": -34.049468994140625,
170
+ "eval_logps/rejected": -37.54916000366211,
171
+ "eval_loss": 0.994844913482666,
172
+ "eval_rewards/accuracies": 0.5631229281425476,
173
+ "eval_rewards/chosen": -0.004474184010177851,
174
+ "eval_rewards/margins": 0.0052884831093251705,
175
+ "eval_rewards/rejected": -0.009762667119503021,
176
+ "eval_runtime": 146.0056,
177
+ "eval_samples_per_second": 2.349,
178
+ "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9923181533813477,
185
+ "logits/rejected": -1.989919662475586,
186
+ "logps/chosen": -33.152462005615234,
187
+ "logps/rejected": -34.039642333984375,
188
+ "loss": 0.9695,
189
+ "rewards/accuracies": 0.6499999761581421,
190
+ "rewards/chosen": 0.027407139539718628,
191
+ "rewards/margins": 0.03048800863325596,
192
+ "rewards/rejected": -0.003080862807109952,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.0034618377685547,
199
+ "logits/rejected": -1.9951326847076416,
200
+ "logps/chosen": -32.344200134277344,
201
+ "logps/rejected": -32.13230895996094,
202
+ "loss": 0.9785,
203
+ "rewards/accuracies": 0.512499988079071,
204
+ "rewards/chosen": 0.03021082654595375,
205
+ "rewards/margins": 0.02145235612988472,
206
+ "rewards/rejected": 0.008758468553423882,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.031149387359619,
213
+ "logits/rejected": -2.0231785774230957,
214
+ "logps/chosen": -30.327457427978516,
215
+ "logps/rejected": -32.072906494140625,
216
+ "loss": 0.9496,
217
+ "rewards/accuracies": 0.5375000238418579,
218
+ "rewards/chosen": 0.044478606432676315,
219
+ "rewards/margins": 0.050448428839445114,
220
+ "rewards/rejected": -0.005969815421849489,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.961639642715454,
227
+ "logits/rejected": -1.9718501567840576,
228
+ "logps/chosen": -31.234424591064453,
229
+ "logps/rejected": -32.57652282714844,
230
+ "loss": 0.9319,
231
+ "rewards/accuracies": 0.6499999761581421,
232
+ "rewards/chosen": 0.05557093024253845,
233
+ "rewards/margins": 0.06806287914514542,
234
+ "rewards/rejected": -0.012491944245994091,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8733562231063843,
241
+ "logits/rejected": -1.8745276927947998,
242
+ "logps/chosen": -33.9085578918457,
243
+ "logps/rejected": -34.8173828125,
244
+ "loss": 0.8973,
245
+ "rewards/accuracies": 0.5874999761581421,
246
+ "rewards/chosen": 0.08524172008037567,
247
+ "rewards/margins": 0.10328052937984467,
248
+ "rewards/rejected": -0.018038809299468994,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9239585399627686,
255
+ "logits/rejected": -1.9205586910247803,
256
+ "logps/chosen": -35.99612045288086,
257
+ "logps/rejected": -32.70136260986328,
258
+ "loss": 0.9538,
259
+ "rewards/accuracies": 0.6499999761581421,
260
+ "rewards/chosen": 0.052666354924440384,
261
+ "rewards/margins": 0.04624979570508003,
262
+ "rewards/rejected": 0.006416561547666788,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.02461576461792,
269
+ "logits/rejected": -2.017319440841675,
270
+ "logps/chosen": -33.47701644897461,
271
+ "logps/rejected": -31.457340240478516,
272
+ "loss": 0.8721,
273
+ "rewards/accuracies": 0.7749999761581421,
274
+ "rewards/chosen": 0.09625236690044403,
275
+ "rewards/margins": 0.12790000438690186,
276
+ "rewards/rejected": -0.031647637486457825,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.030789613723755,
283
+ "logits/rejected": -2.0360310077667236,
284
+ "logps/chosen": -32.23139572143555,
285
+ "logps/rejected": -32.44371032714844,
286
+ "loss": 0.9184,
287
+ "rewards/accuracies": 0.6625000238418579,
288
+ "rewards/chosen": 0.0940173864364624,
289
+ "rewards/margins": 0.0815875232219696,
290
+ "rewards/rejected": 0.012429863214492798,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0321707725524902,
297
+ "logits/rejected": -2.0294008255004883,
298
+ "logps/chosen": -31.273319244384766,
299
+ "logps/rejected": -31.368595123291016,
300
+ "loss": 0.9202,
301
+ "rewards/accuracies": 0.6625000238418579,
302
+ "rewards/chosen": 0.06469077616930008,
303
+ "rewards/margins": 0.0798054188489914,
304
+ "rewards/rejected": -0.015114650130271912,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9020545482635498,
311
+ "logits/rejected": -1.9066784381866455,
312
+ "logps/chosen": -31.315628051757812,
313
+ "logps/rejected": -32.859519958496094,
314
+ "loss": 0.8922,
315
+ "rewards/accuracies": 0.6875,
316
+ "rewards/chosen": 0.0871955007314682,
317
+ "rewards/margins": 0.1077716127038002,
318
+ "rewards/rejected": -0.02057611010968685,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2279350757598877,
324
+ "eval_logits/rejected": -2.2231225967407227,
325
+ "eval_logps/chosen": -34.06590270996094,
326
+ "eval_logps/rejected": -37.589378356933594,
327
+ "eval_loss": 0.9875569939613342,
328
+ "eval_rewards/accuracies": 0.530315637588501,
329
+ "eval_rewards/chosen": -0.009404394775629044,
330
+ "eval_rewards/margins": 0.012423668056726456,
331
+ "eval_rewards/rejected": -0.0218280628323555,
332
+ "eval_runtime": 145.8494,
333
  "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
 
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0143890380859375,
341
+ "logits/rejected": -2.0250439643859863,
342
+ "logps/chosen": -31.765193939208984,
343
+ "logps/rejected": -33.9805908203125,
344
+ "loss": 0.9046,
345
+ "rewards/accuracies": 0.6499999761581421,
346
+ "rewards/chosen": 0.061833299696445465,
347
+ "rewards/margins": 0.09720136225223541,
348
+ "rewards/rejected": -0.03536807745695114,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.907122254371643,
355
+ "logits/rejected": -1.9218772649765015,
356
+ "logps/chosen": -29.810348510742188,
357
+ "logps/rejected": -31.652551651000977,
358
+ "loss": 0.8813,
359
+ "rewards/accuracies": 0.75,
360
+ "rewards/chosen": 0.0903574600815773,
361
+ "rewards/margins": 0.11866964399814606,
362
+ "rewards/rejected": -0.028312185779213905,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9638553857803345,
369
+ "logits/rejected": -1.9678163528442383,
370
+ "logps/chosen": -33.116092681884766,
371
+ "logps/rejected": -31.662506103515625,
372
+ "loss": 0.8826,
373
+ "rewards/accuracies": 0.637499988079071,
374
+ "rewards/chosen": 0.09041241556406021,
375
+ "rewards/margins": 0.1243731826543808,
376
+ "rewards/rejected": -0.03396075591444969,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9616014957427979,
383
+ "logits/rejected": -1.9398181438446045,
384
+ "logps/chosen": -33.844024658203125,
385
+ "logps/rejected": -35.163543701171875,
386
+ "loss": 0.8473,
387
+ "rewards/accuracies": 0.7250000238418579,
388
+ "rewards/chosen": 0.09291732311248779,
389
+ "rewards/margins": 0.15674278140068054,
390
+ "rewards/rejected": -0.06382545083761215,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.002652883529663,
397
+ "logits/rejected": -1.9993493556976318,
398
+ "logps/chosen": -32.73945617675781,
399
+ "logps/rejected": -36.27147674560547,
400
+ "loss": 0.9305,
401
+ "rewards/accuracies": 0.574999988079071,
402
+ "rewards/chosen": 0.055684663355350494,
403
+ "rewards/margins": 0.0696500912308693,
404
+ "rewards/rejected": -0.013965430669486523,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.869681715965271,
411
+ "logits/rejected": -1.8672786951065063,
412
+ "logps/chosen": -34.0054931640625,
413
+ "logps/rejected": -35.55742645263672,
414
+ "loss": 0.9293,
415
+ "rewards/accuracies": 0.6625000238418579,
416
+ "rewards/chosen": 0.05487124249339104,
417
+ "rewards/margins": 0.07071295380592346,
418
+ "rewards/rejected": -0.015841711312532425,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8544126749038696,
425
+ "logits/rejected": -1.852034330368042,
426
+ "logps/chosen": -34.22079086303711,
427
+ "logps/rejected": -31.864788055419922,
428
+ "loss": 0.9205,
429
+ "rewards/accuracies": 0.6625000238418579,
430
+ "rewards/chosen": 0.0530419759452343,
431
+ "rewards/margins": 0.08107996731996536,
432
+ "rewards/rejected": -0.028037995100021362,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.957960844039917,
439
+ "logits/rejected": -1.9474948644638062,
440
+ "logps/chosen": -35.02719497680664,
441
+ "logps/rejected": -31.89957046508789,
442
+ "loss": 0.8836,
443
+ "rewards/accuracies": 0.6625000238418579,
444
+ "rewards/chosen": 0.09883318096399307,
445
+ "rewards/margins": 0.11640648543834686,
446
+ "rewards/rejected": -0.017573300749063492,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0539374351501465,
453
+ "logits/rejected": -2.0390708446502686,
454
+ "logps/chosen": -30.736724853515625,
455
+ "logps/rejected": -32.64698791503906,
456
+ "loss": 0.9452,
457
+ "rewards/accuracies": 0.574999988079071,
458
+ "rewards/chosen": 0.05622049421072006,
459
+ "rewards/margins": 0.05476574972271919,
460
+ "rewards/rejected": 0.001454742974601686,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9243535995483398,
467
+ "logits/rejected": -1.9218591451644897,
468
+ "logps/chosen": -32.35340118408203,
469
+ "logps/rejected": -30.938573837280273,
470
+ "loss": 0.8033,
471
+ "rewards/accuracies": 0.75,
472
+ "rewards/chosen": 0.17211246490478516,
473
+ "rewards/margins": 0.20864196121692657,
474
+ "rewards/rejected": -0.036529459059238434,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.224973440170288,
480
+ "eval_logits/rejected": -2.2201507091522217,
481
+ "eval_logps/chosen": -34.1029052734375,
482
+ "eval_logps/rejected": -37.6210823059082,
483
+ "eval_loss": 0.989064633846283,
484
+ "eval_rewards/accuracies": 0.5182723999023438,
485
+ "eval_rewards/chosen": -0.020507192239165306,
486
+ "eval_rewards/margins": 0.010832280851900578,
487
+ "eval_rewards/rejected": -0.03133947402238846,
488
+ "eval_runtime": 145.7799,
489
+ "eval_samples_per_second": 2.353,
490
+ "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.910521149635315,
497
+ "logits/rejected": -1.9072707891464233,
498
+ "logps/chosen": -31.341650009155273,
499
+ "logps/rejected": -33.842002868652344,
500
+ "loss": 0.9015,
501
+ "rewards/accuracies": 0.737500011920929,
502
+ "rewards/chosen": 0.07851080596446991,
503
+ "rewards/margins": 0.10846547037363052,
504
+ "rewards/rejected": -0.02995465137064457,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9592889547348022,
511
+ "logits/rejected": -1.9470914602279663,
512
+ "logps/chosen": -34.280879974365234,
513
+ "logps/rejected": -33.69014358520508,
514
+ "loss": 0.8601,
515
+ "rewards/accuracies": 0.6625000238418579,
516
+ "rewards/chosen": 0.09455753862857819,
517
+ "rewards/margins": 0.1399478018283844,
518
+ "rewards/rejected": -0.04539026692509651,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9943225383758545,
525
+ "logits/rejected": -1.992897629737854,
526
+ "logps/chosen": -33.13710403442383,
527
+ "logps/rejected": -32.56306838989258,
528
+ "loss": 0.8752,
529
+ "rewards/accuracies": 0.699999988079071,
530
+ "rewards/chosen": 0.10460783541202545,
531
+ "rewards/margins": 0.1296529769897461,
532
+ "rewards/rejected": -0.025045130401849747,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.081005096435547,
539
+ "logits/rejected": -2.0653510093688965,
540
+ "logps/chosen": -33.79452896118164,
541
+ "logps/rejected": -33.11925506591797,
542
+ "loss": 0.8842,
543
+ "rewards/accuracies": 0.699999988079071,
544
+ "rewards/chosen": 0.1154591292142868,
545
+ "rewards/margins": 0.11582595109939575,
546
+ "rewards/rejected": -0.00036681219353340566,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9534845352172852,
553
+ "logits/rejected": -1.9526736736297607,
554
+ "logps/chosen": -32.83572006225586,
555
+ "logps/rejected": -32.52921676635742,
556
+ "loss": 0.8567,
557
+ "rewards/accuracies": 0.699999988079071,
558
+ "rewards/chosen": 0.1339126080274582,
559
+ "rewards/margins": 0.15313318371772766,
560
+ "rewards/rejected": -0.01922057382762432,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.9090850353240967,
567
+ "logits/rejected": -1.919353723526001,
568
+ "logps/chosen": -31.896703720092773,
569
+ "logps/rejected": -35.348304748535156,
570
+ "loss": 0.8898,
571
+ "rewards/accuracies": 0.675000011920929,
572
+ "rewards/chosen": 0.09239637851715088,
573
+ "rewards/margins": 0.11026018857955933,
574
+ "rewards/rejected": -0.017863804474473,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.0488381385803223,
581
+ "logits/rejected": -2.0423855781555176,
582
+ "logps/chosen": -33.353912353515625,
583
+ "logps/rejected": -29.256216049194336,
584
+ "loss": 0.9016,
585
+ "rewards/accuracies": 0.637499988079071,
586
+ "rewards/chosen": 0.0894167423248291,
587
+ "rewards/margins": 0.09835406392812729,
588
+ "rewards/rejected": -0.008937308564782143,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9087949991226196,
595
+ "logits/rejected": -1.9110119342803955,
596
+ "logps/chosen": -33.8967399597168,
597
+ "logps/rejected": -31.023351669311523,
598
+ "loss": 0.8526,
599
+ "rewards/accuracies": 0.737500011920929,
600
+ "rewards/chosen": 0.10269282758235931,
601
+ "rewards/margins": 0.1479099541902542,
602
+ "rewards/rejected": -0.0452171191573143,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.9217340717067967,
610
+ "train_runtime": 3251.2729,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }