hugodk-sch commited on
Commit
6ba2f01
1 Parent(s): e681b49

Model save

Browse files
README.md CHANGED
@@ -1,13 +1,11 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
 
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +16,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4422
24
- - Rewards/chosen: 0.0026
25
- - Rewards/rejected: 0.0097
26
- - Rewards/accuracies: 0.4568
27
- - Rewards/margins: -0.0071
28
- - Logps/rejected: -37.5045
29
- - Logps/chosen: -34.0313
30
- - Logits/rejected: -2.2388
31
- - Logits/chosen: -2.2437
32
 
33
  ## Model description
34
 
@@ -63,9 +61,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
  |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.3805 | 0.26 | 100 | -2.2441 | -2.2393 | -34.0171 | -37.5145 | 0.4061 | 0.5395 | 0.0139 | 0.0122 | 0.0017 |
67
- | 0.371 | 0.52 | 200 | -2.2439 | -2.2390 | -34.0231 | -37.5019 | 0.4323 | 0.5253 | 0.0091 | -0.0027 | 0.0118 |
68
- | 0.3839 | 0.78 | 300 | -2.2436 | -2.2387 | -34.0238 | -37.5004 | 0.4345 | 0.4809 | 0.0086 | -0.0044 | 0.0130 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
+ - alignment-handbook
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
9
  model-index:
10
  - name: aftonposten-6b-align-scan
11
  results: []
 
16
 
17
  # aftonposten-6b-align-scan
18
 
19
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Logits/chosen: -2.2435
22
+ - Logits/rejected: -2.2386
23
+ - Logps/chosen: -34.0177
24
+ - Logps/rejected: -37.5002
25
+ - Loss: 0.9998
26
+ - Rewards/accuracies: 0.4904
27
+ - Rewards/chosen: 0.0135
28
+ - Rewards/margins: 0.0004
29
+ - Rewards/rejected: 0.0132
30
 
31
  ## Model description
32
 
 
61
 
62
  | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
63
  |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
64
+ | 1.0026 | 0.26 | 100 | -2.2433 | -2.2384 | -34.0232 | -37.4972 | 1.0066 | 0.4983 | 0.0091 | -0.0064 | 0.0155 |
65
+ | 0.9923 | 0.52 | 200 | -2.2438 | -2.2389 | -34.0187 | -37.5175 | 0.9864 | 0.5328 | 0.0127 | 0.0134 | -0.0007 |
66
+ | 0.9775 | 0.78 | 300 | -2.2435 | -2.2386 | -34.0177 | -37.5002 | 0.9998 | 0.4904 | 0.0135 | 0.0004 | 0.0132 |
67
 
68
 
69
  ### Framework versions
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "v_proj",
23
  "down_proj",
24
- "up_proj",
25
  "o_proj",
26
  "gate_proj",
27
- "q_proj",
28
- "k_proj"
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "k_proj",
23
  "down_proj",
 
24
  "o_proj",
25
  "gate_proj",
26
+ "up_proj",
27
+ "v_proj",
28
+ "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd4d4dffa9b30971c702f1f39924c707a8b1925a5d67f8de841d1128fe03486b
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:726700f20fcb28f7858cd1aa59e0cb2bd53a0fe4b7d2b662333531d42f0321bf
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,20 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.24367356300354,
4
- "eval_logits/rejected": -2.2387921810150146,
5
- "eval_logps/chosen": -34.03134536743164,
6
- "eval_logps/rejected": -37.504520416259766,
7
- "eval_loss": 0.4422106444835663,
8
- "eval_rewards/accuracies": 0.4568106234073639,
9
- "eval_rewards/chosen": 0.002567538060247898,
10
- "eval_rewards/margins": -0.0071130492724478245,
11
- "eval_rewards/rejected": 0.009680588729679585,
12
- "eval_runtime": 145.9043,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.351,
15
  "eval_steps_per_second": 0.295,
16
- "train_loss": 0.08930327675559303,
17
- "train_runtime": 627.8357,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 4.904,
20
  "train_steps_per_second": 0.613
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2434139251708984,
4
+ "eval_logits/rejected": -2.2385354042053223,
5
+ "eval_logps/chosen": -34.0224494934082,
6
+ "eval_logps/rejected": -37.51002883911133,
7
+ "eval_loss": 0.9960440397262573,
8
+ "eval_rewards/accuracies": 0.5141196250915527,
9
+ "eval_rewards/chosen": 0.0096820630133152,
10
+ "eval_rewards/margins": 0.004409347660839558,
11
+ "eval_rewards/rejected": 0.005272718146443367,
12
+ "eval_runtime": 145.6562,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.355,
15
  "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.21957750196580764,
17
+ "train_runtime": 627.8531,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 4.904,
20
  "train_steps_per_second": 0.613
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.24367356300354,
4
- "eval_logits/rejected": -2.2387921810150146,
5
- "eval_logps/chosen": -34.03134536743164,
6
- "eval_logps/rejected": -37.504520416259766,
7
- "eval_loss": 0.4422106444835663,
8
- "eval_rewards/accuracies": 0.4568106234073639,
9
- "eval_rewards/chosen": 0.002567538060247898,
10
- "eval_rewards/margins": -0.0071130492724478245,
11
- "eval_rewards/rejected": 0.009680588729679585,
12
- "eval_runtime": 145.9043,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.351,
15
  "eval_steps_per_second": 0.295
16
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2434139251708984,
4
+ "eval_logits/rejected": -2.2385354042053223,
5
+ "eval_logps/chosen": -34.0224494934082,
6
+ "eval_logps/rejected": -37.51002883911133,
7
+ "eval_loss": 0.9960440397262573,
8
+ "eval_rewards/accuracies": 0.5141196250915527,
9
+ "eval_rewards/chosen": 0.0096820630133152,
10
+ "eval_rewards/margins": 0.004409347660839558,
11
+ "eval_rewards/rejected": 0.005272718146443367,
12
+ "eval_runtime": 145.6562,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.355,
15
  "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.08930327675559303,
4
- "train_runtime": 627.8357,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 4.904,
7
  "train_steps_per_second": 0.613
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.21957750196580764,
4
+ "train_runtime": 627.8531,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 4.904,
7
  "train_steps_per_second": 0.613
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.3906,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,311 +25,311 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.8662992715835571,
29
- "logits/rejected": -1.8706117868423462,
30
- "logps/chosen": -36.97681427001953,
31
- "logps/rejected": -33.66523361206055,
32
- "loss": 0.3447,
33
- "rewards/accuracies": 0.5416666865348816,
34
- "rewards/chosen": 0.023829741403460503,
35
- "rewards/margins": 0.05195777863264084,
36
- "rewards/rejected": -0.028128040954470634,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.997193694114685,
43
- "logits/rejected": -1.9998573064804077,
44
- "logps/chosen": -29.65359878540039,
45
- "logps/rejected": -29.054311752319336,
46
- "loss": 0.4534,
47
  "rewards/accuracies": 0.4375,
48
- "rewards/chosen": -0.009124360978603363,
49
- "rewards/margins": -0.016431041061878204,
50
- "rewards/rejected": 0.007306680083274841,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.9197280406951904,
57
- "logits/rejected": -1.9170429706573486,
58
- "logps/chosen": -31.414775848388672,
59
- "logps/rejected": -33.24064254760742,
60
- "loss": 0.4025,
61
- "rewards/accuracies": 0.5625,
62
- "rewards/chosen": 0.0010542668169364333,
63
- "rewards/margins": 0.018799806013703346,
64
- "rewards/rejected": -0.01774553768336773,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.0171010494232178,
71
- "logits/rejected": -2.008350372314453,
72
- "logps/chosen": -32.59648513793945,
73
- "logps/rejected": -32.50862121582031,
74
- "loss": 0.4532,
75
  "rewards/accuracies": 0.44999998807907104,
76
- "rewards/chosen": -0.015740757808089256,
77
- "rewards/margins": -0.018979396671056747,
78
- "rewards/rejected": 0.0032386414241045713,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8645904064178467,
85
- "logits/rejected": -1.8538070917129517,
86
- "logps/chosen": -33.542823791503906,
87
- "logps/rejected": -35.43744659423828,
88
- "loss": 0.4304,
89
- "rewards/accuracies": 0.5,
90
- "rewards/chosen": 0.011724205687642097,
91
- "rewards/margins": -0.00019102543592453003,
92
- "rewards/rejected": 0.011915231123566628,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9461469650268555,
99
- "logits/rejected": -1.948094129562378,
100
- "logps/chosen": -32.587215423583984,
101
- "logps/rejected": -33.21445846557617,
102
- "loss": 0.3539,
103
- "rewards/accuracies": 0.637499988079071,
104
- "rewards/chosen": 0.010374858975410461,
105
- "rewards/margins": 0.04483799636363983,
106
- "rewards/rejected": -0.03446313738822937,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.079591989517212,
113
- "logits/rejected": -2.0845742225646973,
114
- "logps/chosen": -34.00908279418945,
115
- "logps/rejected": -36.58150863647461,
116
- "loss": 0.4506,
117
  "rewards/accuracies": 0.512499988079071,
118
- "rewards/chosen": -0.01648041605949402,
119
- "rewards/margins": -0.006370754446834326,
120
- "rewards/rejected": -0.010109663009643555,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9423307180404663,
127
- "logits/rejected": -1.9454963207244873,
128
- "logps/chosen": -34.39698028564453,
129
- "logps/rejected": -34.59761428833008,
130
- "loss": 0.3889,
131
  "rewards/accuracies": 0.5249999761581421,
132
- "rewards/chosen": 0.024663064628839493,
133
- "rewards/margins": 0.028829574584960938,
134
- "rewards/rejected": -0.00416650902479887,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.9508873224258423,
141
- "logits/rejected": -1.9553953409194946,
142
- "logps/chosen": -32.48583221435547,
143
- "logps/rejected": -32.35867691040039,
144
- "loss": 0.4445,
145
- "rewards/accuracies": 0.4625000059604645,
146
- "rewards/chosen": -0.014726865105330944,
147
- "rewards/margins": -0.014909917488694191,
148
- "rewards/rejected": 0.00018304325931239873,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.0491719245910645,
155
- "logits/rejected": -2.047179937362671,
156
- "logps/chosen": -32.22635269165039,
157
- "logps/rejected": -31.287487030029297,
158
- "loss": 0.3805,
159
- "rewards/accuracies": 0.550000011920929,
160
- "rewards/chosen": 0.011584864929318428,
161
- "rewards/margins": 0.02860759198665619,
162
- "rewards/rejected": -0.01702272891998291,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.244148015975952,
168
- "eval_logits/rejected": -2.2392663955688477,
169
- "eval_logps/chosen": -34.01713943481445,
170
- "eval_logps/rejected": -37.514495849609375,
171
- "eval_loss": 0.4061162769794464,
172
- "eval_rewards/accuracies": 0.5394518375396729,
173
- "eval_rewards/chosen": 0.013928660191595554,
174
- "eval_rewards/margins": 0.012230273336172104,
175
- "eval_rewards/rejected": 0.0016983875539153814,
176
- "eval_runtime": 146.1091,
177
- "eval_samples_per_second": 2.348,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.005580186843872,
185
- "logits/rejected": -2.0031564235687256,
186
- "logps/chosen": -33.24415588378906,
187
- "logps/rejected": -33.99993133544922,
188
- "loss": 0.4792,
189
- "rewards/accuracies": 0.4375,
190
- "rewards/chosen": -0.00026987865567207336,
191
- "rewards/margins": -0.02382112666964531,
192
- "rewards/rejected": 0.023551244288682938,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.01680326461792,
199
- "logits/rejected": -2.0084121227264404,
200
- "logps/chosen": -32.45465850830078,
201
- "logps/rejected": -32.1729850769043,
202
- "loss": 0.4286,
203
- "rewards/accuracies": 0.4375,
204
- "rewards/chosen": -0.007803081069141626,
205
- "rewards/margins": 0.0013800703454762697,
206
- "rewards/rejected": -0.009183152578771114,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0466856956481934,
213
- "logits/rejected": -2.0386409759521484,
214
- "logps/chosen": -30.494335174560547,
215
- "logps/rejected": -32.051963806152344,
216
- "loss": 0.4497,
217
- "rewards/accuracies": 0.4749999940395355,
218
- "rewards/chosen": -0.014895597472786903,
219
- "rewards/margins": -0.015729816630482674,
220
- "rewards/rejected": 0.0008342192741110921,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.977065086364746,
227
- "logits/rejected": -1.987343430519104,
228
- "logps/chosen": -31.37868309020996,
229
- "logps/rejected": -32.54730224609375,
230
- "loss": 0.3589,
231
- "rewards/accuracies": 0.574999988079071,
232
- "rewards/chosen": 0.03278205543756485,
233
- "rewards/margins": 0.04271895810961723,
234
- "rewards/rejected": -0.009936909191310406,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8916466236114502,
241
- "logits/rejected": -1.8927490711212158,
242
- "logps/chosen": -34.209651947021484,
243
- "logps/rejected": -34.766143798828125,
244
- "loss": 0.4325,
245
- "rewards/accuracies": 0.42500001192092896,
246
- "rewards/chosen": -0.013569498434662819,
247
- "rewards/margins": -0.00645996630191803,
248
- "rewards/rejected": -0.007109532598406076,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.9427303075790405,
255
- "logits/rejected": -1.939252495765686,
256
- "logps/chosen": -36.14452362060547,
257
- "logps/rejected": -32.73284149169922,
258
- "loss": 0.3796,
259
- "rewards/accuracies": 0.550000011920929,
260
- "rewards/chosen": 0.021723434329032898,
261
- "rewards/margins": 0.02979486621916294,
262
- "rewards/rejected": -0.008071433752775192,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.0424513816833496,
269
- "logits/rejected": -2.035060405731201,
270
- "logps/chosen": -33.786170959472656,
271
- "logps/rejected": -31.34820556640625,
272
- "loss": 0.4172,
273
- "rewards/accuracies": 0.512499988079071,
274
- "rewards/chosen": 0.009350698441267014,
275
- "rewards/margins": 0.006434415467083454,
276
- "rewards/rejected": 0.0029162843711674213,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0475661754608154,
283
- "logits/rejected": -2.052834987640381,
284
- "logps/chosen": -32.528114318847656,
285
- "logps/rejected": -32.50902557373047,
286
- "loss": 0.3766,
287
- "rewards/accuracies": 0.4749999940395355,
288
- "rewards/chosen": 0.013338183984160423,
289
- "rewards/margins": 0.03244578838348389,
290
- "rewards/rejected": -0.019107606261968613,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.048661231994629,
297
- "logits/rejected": -2.045872926712036,
298
- "logps/chosen": -31.496755599975586,
299
- "logps/rejected": -31.314464569091797,
300
- "loss": 0.4475,
301
  "rewards/accuracies": 0.5,
302
- "rewards/chosen": -0.00624021515250206,
303
- "rewards/margins": -0.009242123924195766,
304
- "rewards/rejected": 0.003001909703016281,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9193611145019531,
311
- "logits/rejected": -1.9240529537200928,
312
- "logps/chosen": -31.579212188720703,
313
- "logps/rejected": -32.80603790283203,
314
- "loss": 0.371,
315
- "rewards/accuracies": 0.6000000238418579,
316
- "rewards/chosen": 0.02165621519088745,
317
- "rewards/margins": 0.033739686012268066,
318
- "rewards/rejected": -0.012083468958735466,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.243856191635132,
324
- "eval_logits/rejected": -2.2389791011810303,
325
- "eval_logps/chosen": -34.023136138916016,
326
- "eval_logps/rejected": -37.501853942871094,
327
- "eval_loss": 0.4322855770587921,
328
- "eval_rewards/accuracies": 0.5253322720527649,
329
- "eval_rewards/chosen": 0.009133166633546352,
330
- "eval_rewards/margins": -0.002679171971976757,
331
- "eval_rewards/rejected": 0.01181233860552311,
332
- "eval_runtime": 145.9618,
333
  "eval_samples_per_second": 2.35,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
@@ -337,277 +337,277 @@
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.0320448875427246,
341
- "logits/rejected": -2.0427393913269043,
342
- "logps/chosen": -31.947372436523438,
343
- "logps/rejected": -33.899864196777344,
344
- "loss": 0.3622,
345
- "rewards/accuracies": 0.6000000238418579,
346
- "rewards/chosen": 0.019143681973218918,
347
- "rewards/margins": 0.04887578636407852,
348
- "rewards/rejected": -0.029732098802924156,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9257709980010986,
355
- "logits/rejected": -1.940639853477478,
356
- "logps/chosen": -30.079341888427734,
357
- "logps/rejected": -31.5616397857666,
358
- "loss": 0.3884,
359
- "rewards/accuracies": 0.612500011920929,
360
- "rewards/chosen": 0.025758206844329834,
361
- "rewards/margins": 0.028527002781629562,
362
- "rewards/rejected": -0.0027687971014529467,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.9829959869384766,
369
- "logits/rejected": -1.9869670867919922,
370
- "logps/chosen": -33.4053955078125,
371
- "logps/rejected": -31.562353134155273,
372
- "loss": 0.3918,
373
- "rewards/accuracies": 0.5874999761581421,
374
- "rewards/chosen": 0.009657363407313824,
375
- "rewards/margins": 0.02010050043463707,
376
- "rewards/rejected": -0.010443134233355522,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9836658239364624,
383
- "logits/rejected": -1.9616800546646118,
384
- "logps/chosen": -34.175201416015625,
385
- "logps/rejected": -34.966102600097656,
386
- "loss": 0.4346,
387
- "rewards/accuracies": 0.5375000238418579,
388
- "rewards/chosen": -0.017159918323159218,
389
- "rewards/margins": -0.004913450218737125,
390
- "rewards/rejected": -0.012246469967067242,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0244596004486084,
397
- "logits/rejected": -2.021144390106201,
398
- "logps/chosen": -32.932373046875,
399
- "logps/rejected": -36.2264289855957,
400
- "loss": 0.4397,
401
- "rewards/accuracies": 0.512499988079071,
402
- "rewards/chosen": -0.005845203995704651,
403
- "rewards/margins": -0.004640025552362204,
404
- "rewards/rejected": -0.001205177279189229,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.891283392906189,
411
- "logits/rejected": -1.8888483047485352,
412
- "logps/chosen": -34.20048522949219,
413
- "logps/rejected": -35.507022857666016,
414
- "loss": 0.4441,
415
- "rewards/accuracies": 0.5625,
416
- "rewards/chosen": -0.009669994935393333,
417
- "rewards/margins": -0.0077504729852080345,
418
- "rewards/rejected": -0.0019195213681086898,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.8760111331939697,
425
- "logits/rejected": -1.8734772205352783,
426
- "logps/chosen": -34.39020919799805,
427
- "logps/rejected": -31.75579261779785,
428
- "loss": 0.4295,
429
- "rewards/accuracies": 0.42500001192092896,
430
- "rewards/chosen": 0.0059113698080182076,
431
- "rewards/margins": -0.006519269198179245,
432
- "rewards/rejected": 0.012430639937520027,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9797089099884033,
439
- "logits/rejected": -1.9690707921981812,
440
- "logps/chosen": -35.31258773803711,
441
- "logps/rejected": -31.837697982788086,
442
- "loss": 0.3868,
443
- "rewards/accuracies": 0.5249999761581421,
444
- "rewards/chosen": 0.03524213656783104,
445
- "rewards/margins": 0.0326063297688961,
446
- "rewards/rejected": 0.0026358035393059254,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.0755209922790527,
453
- "logits/rejected": -2.060497760772705,
454
- "logps/chosen": -30.90865135192871,
455
- "logps/rejected": -32.64521026611328,
456
- "loss": 0.41,
457
- "rewards/accuracies": 0.44999998807907104,
458
- "rewards/chosen": 0.012379061430692673,
459
- "rewards/margins": 0.007080032490193844,
460
- "rewards/rejected": 0.005299028940498829,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.9463651180267334,
467
- "logits/rejected": -1.9438308477401733,
468
- "logps/chosen": -32.89823532104492,
469
- "logps/rejected": -30.81850814819336,
470
- "loss": 0.3839,
471
- "rewards/accuracies": 0.5874999761581421,
472
- "rewards/chosen": 0.02309919334948063,
473
- "rewards/margins": 0.024458223953843117,
474
- "rewards/rejected": -0.0013590289745479822,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2435548305511475,
480
- "eval_logits/rejected": -2.2386720180511475,
481
- "eval_logps/chosen": -34.023826599121094,
482
- "eval_logps/rejected": -37.50039291381836,
483
- "eval_loss": 0.4344586431980133,
484
- "eval_rewards/accuracies": 0.4808970093727112,
485
- "eval_rewards/chosen": 0.00858243927359581,
486
- "eval_rewards/margins": -0.004399063065648079,
487
- "eval_rewards/rejected": 0.012981505133211613,
488
- "eval_runtime": 145.9473,
489
- "eval_samples_per_second": 2.35,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9286285638809204,
497
- "logits/rejected": -1.9253568649291992,
498
- "logps/chosen": -31.583232879638672,
499
- "logps/rejected": -33.75123596191406,
500
- "loss": 0.3881,
501
- "rewards/accuracies": 0.5375000238418579,
502
- "rewards/chosen": 0.016091840341687202,
503
- "rewards/margins": 0.023359118029475212,
504
- "rewards/rejected": -0.007267280016094446,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9803975820541382,
511
- "logits/rejected": -1.9680900573730469,
512
- "logps/chosen": -34.58079147338867,
513
- "logps/rejected": -33.575767517089844,
514
- "loss": 0.3778,
515
  "rewards/accuracies": 0.5375000238418579,
516
- "rewards/chosen": 0.012225466780364513,
517
- "rewards/margins": 0.041770923882722855,
518
- "rewards/rejected": -0.029545456171035767,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.015634298324585,
525
- "logits/rejected": -2.0141713619232178,
526
- "logps/chosen": -33.45996856689453,
527
- "logps/rejected": -32.48029327392578,
528
- "loss": 0.4032,
529
- "rewards/accuracies": 0.5625,
530
- "rewards/chosen": 0.020660031586885452,
531
- "rewards/margins": 0.021225089207291603,
532
- "rewards/rejected": -0.0005650619277730584,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.1035873889923096,
539
- "logits/rejected": -2.0877768993377686,
540
- "logps/chosen": -34.162208557128906,
541
- "logps/rejected": -33.095733642578125,
542
- "loss": 0.439,
543
- "rewards/accuracies": 0.4625000059604645,
544
- "rewards/chosen": 0.013746557757258415,
545
- "rewards/margins": -0.004091509617865086,
546
- "rewards/rejected": 0.017838066443800926,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9746344089508057,
553
- "logits/rejected": -1.9736888408660889,
554
- "logps/chosen": -33.253448486328125,
555
- "logps/rejected": -32.458499908447266,
556
- "loss": 0.3954,
557
- "rewards/accuracies": 0.5249999761581421,
558
- "rewards/chosen": 0.02291743829846382,
559
- "rewards/margins": 0.017600122839212418,
560
- "rewards/rejected": 0.005317316390573978,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9309253692626953,
567
- "logits/rejected": -1.941303014755249,
568
- "logps/chosen": -32.21305465698242,
569
- "logps/rejected": -35.309688568115234,
570
- "loss": 0.4095,
571
- "rewards/accuracies": 0.42500001192092896,
572
- "rewards/chosen": -0.006690672133117914,
573
- "rewards/margins": 0.010053041391074657,
574
- "rewards/rejected": -0.01674371212720871,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.069108009338379,
581
- "logits/rejected": -2.0625574588775635,
582
- "logps/chosen": -33.637176513671875,
583
- "logps/rejected": -29.221187591552734,
584
- "loss": 0.4059,
585
- "rewards/accuracies": 0.48750001192092896,
586
- "rewards/chosen": 0.011833530850708485,
587
- "rewards/margins": 0.00764369685202837,
588
- "rewards/rejected": 0.004189834464341402,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.9292182922363281,
595
- "logits/rejected": -1.9313886165618896,
596
- "logps/chosen": -34.24732971191406,
597
- "logps/rejected": -30.893402099609375,
598
- "loss": 0.4086,
599
- "rewards/accuracies": 0.4749999940395355,
600
- "rewards/chosen": -0.006622877903282642,
601
- "rewards/margins": 0.009996414184570312,
602
- "rewards/rejected": -0.01661929115653038,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.08930327675559303,
610
- "train_runtime": 627.8357,
611
  "train_samples_per_second": 4.904,
612
  "train_steps_per_second": 0.613
613
  }
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.866065502166748,
29
+ "logits/rejected": -1.8703795671463013,
30
+ "logps/chosen": -36.988380432128906,
31
+ "logps/rejected": -33.66728210449219,
32
+ "loss": 0.9557,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.014575847424566746,
35
+ "rewards/margins": 0.04434243589639664,
36
+ "rewards/rejected": -0.02976658195257187,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.997332215309143,
43
+ "logits/rejected": -1.999983549118042,
44
+ "logps/chosen": -29.625896453857422,
45
+ "logps/rejected": -29.035802841186523,
46
+ "loss": 1.0091,
47
  "rewards/accuracies": 0.4375,
48
+ "rewards/chosen": 0.013037634082138538,
49
+ "rewards/margins": -0.009074507281184196,
50
+ "rewards/rejected": 0.02211214043200016,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.9199495315551758,
57
+ "logits/rejected": -1.917249321937561,
58
+ "logps/chosen": -31.421478271484375,
59
+ "logps/rejected": -33.2115364074707,
60
+ "loss": 1.0098,
61
+ "rewards/accuracies": 0.4625000059604645,
62
+ "rewards/chosen": -0.004308355040848255,
63
+ "rewards/margins": -0.009847955778241158,
64
+ "rewards/rejected": 0.005539602134376764,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.0169284343719482,
71
+ "logits/rejected": -2.008178949356079,
72
+ "logps/chosen": -32.59435272216797,
73
+ "logps/rejected": -32.49193572998047,
74
+ "loss": 1.0306,
75
  "rewards/accuracies": 0.44999998807907104,
76
+ "rewards/chosen": -0.014034454710781574,
77
+ "rewards/margins": -0.030623534694314003,
78
+ "rewards/rejected": 0.016589080914855003,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.86457097530365,
85
+ "logits/rejected": -1.8537908792495728,
86
+ "logps/chosen": -33.56566619873047,
87
+ "logps/rejected": -35.423240661621094,
88
+ "loss": 1.0298,
89
+ "rewards/accuracies": 0.4375,
90
+ "rewards/chosen": -0.006550998892635107,
91
+ "rewards/margins": -0.029834842309355736,
92
+ "rewards/rejected": 0.02328384295105934,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9449050426483154,
99
+ "logits/rejected": -1.9468472003936768,
100
+ "logps/chosen": -32.59955596923828,
101
+ "logps/rejected": -33.1828498840332,
102
+ "loss": 0.9903,
103
+ "rewards/accuracies": 0.512499988079071,
104
+ "rewards/chosen": 0.0005022765835747123,
105
+ "rewards/margins": 0.009675229899585247,
106
+ "rewards/rejected": -0.00917295552790165,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.079878330230713,
113
+ "logits/rejected": -2.084862232208252,
114
+ "logps/chosen": -33.98878860473633,
115
+ "logps/rejected": -36.574462890625,
116
+ "loss": 0.9958,
117
  "rewards/accuracies": 0.512499988079071,
118
+ "rewards/chosen": -0.00024233898147940636,
119
+ "rewards/margins": 0.004231657367199659,
120
+ "rewards/rejected": -0.004473996348679066,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.9425569772720337,
127
+ "logits/rejected": -1.9457191228866577,
128
+ "logps/chosen": -34.40068054199219,
129
+ "logps/rejected": -34.5762939453125,
130
+ "loss": 0.9912,
131
  "rewards/accuracies": 0.5249999761581421,
132
+ "rewards/chosen": 0.021701809018850327,
133
+ "rewards/margins": 0.00881027989089489,
134
+ "rewards/rejected": 0.012891530990600586,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.9507396221160889,
141
+ "logits/rejected": -1.9552500247955322,
142
+ "logps/chosen": -32.460357666015625,
143
+ "logps/rejected": -32.354434967041016,
144
+ "loss": 0.9979,
145
+ "rewards/accuracies": 0.512499988079071,
146
+ "rewards/chosen": 0.005654479376971722,
147
+ "rewards/margins": 0.002074276329949498,
148
+ "rewards/rejected": 0.0035802037455141544,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.0492873191833496,
155
+ "logits/rejected": -2.0472888946533203,
156
+ "logps/chosen": -32.23810958862305,
157
+ "logps/rejected": -31.260278701782227,
158
+ "loss": 1.0026,
159
+ "rewards/accuracies": 0.4625000059604645,
160
+ "rewards/chosen": 0.0021828364115208387,
161
+ "rewards/margins": -0.002563956892117858,
162
+ "rewards/rejected": 0.004746791906654835,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.243312358856201,
168
+ "eval_logits/rejected": -2.238436222076416,
169
+ "eval_logps/chosen": -34.023216247558594,
170
+ "eval_logps/rejected": -37.49723434448242,
171
+ "eval_loss": 1.0065712928771973,
172
+ "eval_rewards/accuracies": 0.49833887815475464,
173
+ "eval_rewards/chosen": 0.009070919826626778,
174
+ "eval_rewards/margins": -0.006438401062041521,
175
+ "eval_rewards/rejected": 0.015509321354329586,
176
+ "eval_runtime": 146.1739,
177
+ "eval_samples_per_second": 2.347,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.005169153213501,
185
+ "logits/rejected": -2.0027499198913574,
186
+ "logps/chosen": -33.2365837097168,
187
+ "logps/rejected": -34.01953125,
188
+ "loss": 1.0021,
189
+ "rewards/accuracies": 0.4625000059604645,
190
+ "rewards/chosen": 0.005784005858004093,
191
+ "rewards/margins": -0.002089007291942835,
192
+ "rewards/rejected": 0.007873011752963066,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.0166728496551514,
199
+ "logits/rejected": -2.0083022117614746,
200
+ "logps/chosen": -32.457847595214844,
201
+ "logps/rejected": -32.18357467651367,
202
+ "loss": 0.9927,
203
+ "rewards/accuracies": 0.550000011920929,
204
+ "rewards/chosen": -0.010353903286159039,
205
+ "rewards/margins": 0.0072973729111254215,
206
+ "rewards/rejected": -0.017651278525590897,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.0462448596954346,
213
+ "logits/rejected": -2.038203001022339,
214
+ "logps/chosen": -30.475027084350586,
215
+ "logps/rejected": -32.046302795410156,
216
+ "loss": 1.0048,
217
+ "rewards/accuracies": 0.48750001192092896,
218
+ "rewards/chosen": 0.0005506269517354667,
219
+ "rewards/margins": -0.004812211729586124,
220
+ "rewards/rejected": 0.005362839438021183,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.976986289024353,
227
+ "logits/rejected": -1.9872560501098633,
228
+ "logps/chosen": -31.407278060913086,
229
+ "logps/rejected": -32.543296813964844,
230
+ "loss": 0.9834,
231
+ "rewards/accuracies": 0.48750001192092896,
232
+ "rewards/chosen": 0.009907384403049946,
233
+ "rewards/margins": 0.016641682013869286,
234
+ "rewards/rejected": -0.006734299007803202,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.8905508518218994,
241
+ "logits/rejected": -1.891632080078125,
242
+ "logps/chosen": -34.20501708984375,
243
+ "logps/rejected": -34.77235412597656,
244
+ "loss": 0.9978,
245
+ "rewards/accuracies": 0.4625000059604645,
246
+ "rewards/chosen": -0.009861050173640251,
247
+ "rewards/margins": 0.0022162157110869884,
248
+ "rewards/rejected": -0.012077265419065952,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.94281005859375,
255
+ "logits/rejected": -1.939327597618103,
256
+ "logps/chosen": -36.144107818603516,
257
+ "logps/rejected": -32.72822570800781,
258
+ "loss": 0.9736,
259
+ "rewards/accuracies": 0.5375000238418579,
260
+ "rewards/chosen": 0.02205902896821499,
261
+ "rewards/margins": 0.026437148451805115,
262
+ "rewards/rejected": -0.004378115758299828,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.0420708656311035,
269
+ "logits/rejected": -2.0346803665161133,
270
+ "logps/chosen": -33.771934509277344,
271
+ "logps/rejected": -31.371145248413086,
272
+ "loss": 0.9638,
273
+ "rewards/accuracies": 0.5375000238418579,
274
+ "rewards/chosen": 0.020745161920785904,
275
+ "rewards/margins": 0.036180444061756134,
276
+ "rewards/rejected": -0.015435280278325081,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.0475738048553467,
283
+ "logits/rejected": -2.0528526306152344,
284
+ "logps/chosen": -32.524593353271484,
285
+ "logps/rejected": -32.510643005371094,
286
+ "loss": 0.9634,
287
+ "rewards/accuracies": 0.550000011920929,
288
+ "rewards/chosen": 0.016154326498508453,
289
+ "rewards/margins": 0.036555372178554535,
290
+ "rewards/rejected": -0.020401041954755783,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.048490524291992,
297
+ "logits/rejected": -2.0457024574279785,
298
+ "logps/chosen": -31.492746353149414,
299
+ "logps/rejected": -31.319293975830078,
300
+ "loss": 1.0022,
301
  "rewards/accuracies": 0.5,
302
+ "rewards/chosen": -0.003033895045518875,
303
+ "rewards/margins": -0.002170243766158819,
304
+ "rewards/rejected": -0.0008636537822894752,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9185683727264404,
311
+ "logits/rejected": -1.9232347011566162,
312
+ "logps/chosen": -31.5926513671875,
313
+ "logps/rejected": -32.78697204589844,
314
+ "loss": 0.9923,
315
+ "rewards/accuracies": 0.5874999761581421,
316
+ "rewards/chosen": 0.01090637780725956,
317
+ "rewards/margins": 0.007740379776805639,
318
+ "rewards/rejected": 0.0031659987289458513,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.243807315826416,
324
+ "eval_logits/rejected": -2.2389235496520996,
325
+ "eval_logps/chosen": -34.018714904785156,
326
+ "eval_logps/rejected": -37.517478942871094,
327
+ "eval_loss": 0.9864435195922852,
328
+ "eval_rewards/accuracies": 0.5328072905540466,
329
+ "eval_rewards/chosen": 0.012671924196183681,
330
+ "eval_rewards/margins": 0.013361074030399323,
331
+ "eval_rewards/rejected": -0.0006891504744999111,
332
+ "eval_runtime": 145.9847,
333
  "eval_samples_per_second": 2.35,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
 
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0318503379821777,
341
+ "logits/rejected": -2.042539596557617,
342
+ "logps/chosen": -31.948400497436523,
343
+ "logps/rejected": -33.86983871459961,
344
+ "loss": 0.976,
345
+ "rewards/accuracies": 0.5,
346
+ "rewards/chosen": 0.018323851749300957,
347
+ "rewards/margins": 0.024035325273871422,
348
+ "rewards/rejected": -0.005711475852876902,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.9251388311386108,
355
+ "logits/rejected": -1.9399843215942383,
356
+ "logps/chosen": -30.099853515625,
357
+ "logps/rejected": -31.55409812927246,
358
+ "loss": 0.9939,
359
+ "rewards/accuracies": 0.5249999761581421,
360
+ "rewards/chosen": 0.009348717518150806,
361
+ "rewards/margins": 0.006087350193411112,
362
+ "rewards/rejected": 0.003261365694925189,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.9835479259490967,
369
+ "logits/rejected": -1.9875112771987915,
370
+ "logps/chosen": -33.387638092041016,
371
+ "logps/rejected": -31.554845809936523,
372
+ "loss": 0.9717,
373
+ "rewards/accuracies": 0.550000011920929,
374
+ "rewards/chosen": 0.023866940289735794,
375
+ "rewards/margins": 0.028299665078520775,
376
+ "rewards/rejected": -0.00443272665143013,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.9826898574829102,
383
+ "logits/rejected": -1.9607274532318115,
384
+ "logps/chosen": -34.158443450927734,
385
+ "logps/rejected": -34.963783264160156,
386
+ "loss": 0.9934,
387
+ "rewards/accuracies": 0.512499988079071,
388
+ "rewards/chosen": -0.0037571662105619907,
389
+ "rewards/margins": 0.0066327848471701145,
390
+ "rewards/rejected": -0.01038995012640953,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.024381160736084,
397
+ "logits/rejected": -2.0210862159729004,
398
+ "logps/chosen": -32.9254035949707,
399
+ "logps/rejected": -36.251712799072266,
400
+ "loss": 0.9788,
401
+ "rewards/accuracies": 0.5375000238418579,
402
+ "rewards/chosen": -0.0002662956831045449,
403
+ "rewards/margins": 0.02116318605840206,
404
+ "rewards/rejected": -0.021429482847452164,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.8911311626434326,
411
+ "logits/rejected": -1.8886839151382446,
412
+ "logps/chosen": -34.194557189941406,
413
+ "logps/rejected": -35.51445770263672,
414
+ "loss": 0.9971,
415
+ "rewards/accuracies": 0.48750001192092896,
416
+ "rewards/chosen": -0.004926486872136593,
417
+ "rewards/margins": 0.002939151134341955,
418
+ "rewards/rejected": -0.007865638472139835,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.8759450912475586,
425
+ "logits/rejected": -1.8734045028686523,
426
+ "logps/chosen": -34.40558624267578,
427
+ "logps/rejected": -31.752349853515625,
428
+ "loss": 1.0216,
429
+ "rewards/accuracies": 0.4625000059604645,
430
+ "rewards/chosen": -0.0063878437504172325,
431
+ "rewards/margins": -0.02157408930361271,
432
+ "rewards/rejected": 0.015186244621872902,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.980015754699707,
439
+ "logits/rejected": -1.9693737030029297,
440
+ "logps/chosen": -35.33230209350586,
441
+ "logps/rejected": -31.845691680908203,
442
+ "loss": 0.9768,
443
+ "rewards/accuracies": 0.512499988079071,
444
+ "rewards/chosen": 0.019471382722258568,
445
+ "rewards/margins": 0.023230455815792084,
446
+ "rewards/rejected": -0.0037590693682432175,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.0756278038024902,
453
+ "logits/rejected": -2.060606002807617,
454
+ "logps/chosen": -30.907390594482422,
455
+ "logps/rejected": -32.64055252075195,
456
+ "loss": 0.9956,
457
+ "rewards/accuracies": 0.550000011920929,
458
+ "rewards/chosen": 0.01338677667081356,
459
+ "rewards/margins": 0.0043600378558039665,
460
+ "rewards/rejected": 0.00902673788368702,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.946616768836975,
467
+ "logits/rejected": -1.9440828561782837,
468
+ "logps/chosen": -32.894561767578125,
469
+ "logps/rejected": -30.812387466430664,
470
+ "loss": 0.9775,
471
+ "rewards/accuracies": 0.5,
472
+ "rewards/chosen": 0.026041675359010696,
473
+ "rewards/margins": 0.022502180188894272,
474
+ "rewards/rejected": 0.003539492143318057,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2434821128845215,
480
+ "eval_logits/rejected": -2.2386035919189453,
481
+ "eval_logps/chosen": -34.017669677734375,
482
+ "eval_logps/rejected": -37.50018310546875,
483
+ "eval_loss": 0.9997690916061401,
484
+ "eval_rewards/accuracies": 0.490448534488678,
485
+ "eval_rewards/chosen": 0.013507848605513573,
486
+ "eval_rewards/margins": 0.000357679498847574,
487
+ "eval_rewards/rejected": 0.0131501704454422,
488
+ "eval_runtime": 145.7344,
489
+ "eval_samples_per_second": 2.354,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.9287067651748657,
497
+ "logits/rejected": -1.925451636314392,
498
+ "logps/chosen": -31.603496551513672,
499
+ "logps/rejected": -33.734046936035156,
500
+ "loss": 1.0066,
501
+ "rewards/accuracies": 0.4749999940395355,
502
+ "rewards/chosen": -0.00011934786743950099,
503
+ "rewards/margins": -0.006604696158319712,
504
+ "rewards/rejected": 0.006485348101705313,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.9802377223968506,
511
+ "logits/rejected": -1.9679291248321533,
512
+ "logps/chosen": -34.585323333740234,
513
+ "logps/rejected": -33.57084274291992,
514
+ "loss": 0.9658,
515
  "rewards/accuracies": 0.5375000238418579,
516
+ "rewards/chosen": 0.00860034953802824,
517
+ "rewards/margins": 0.034203141927719116,
518
+ "rewards/rejected": -0.02560279332101345,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.015906810760498,
525
+ "logits/rejected": -2.014427661895752,
526
+ "logps/chosen": -33.49116516113281,
527
+ "logps/rejected": -32.47978973388672,
528
+ "loss": 1.0041,
529
+ "rewards/accuracies": 0.4375,
530
+ "rewards/chosen": -0.0042965407483279705,
531
+ "rewards/margins": -0.004129170440137386,
532
+ "rewards/rejected": -0.00016737030819058418,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.1030631065368652,
539
+ "logits/rejected": -2.0872654914855957,
540
+ "logps/chosen": -34.18492889404297,
541
+ "logps/rejected": -33.08319854736328,
542
+ "loss": 1.0323,
543
+ "rewards/accuracies": 0.42500001192092896,
544
+ "rewards/chosen": -0.00442854966968298,
545
+ "rewards/margins": -0.03229869529604912,
546
+ "rewards/rejected": 0.027870142832398415,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.9745471477508545,
553
+ "logits/rejected": -1.9736032485961914,
554
+ "logps/chosen": -33.23271942138672,
555
+ "logps/rejected": -32.4765510559082,
556
+ "loss": 0.9514,
557
+ "rewards/accuracies": 0.612500011920929,
558
+ "rewards/chosen": 0.0395023450255394,
559
+ "rewards/margins": 0.04862459748983383,
560
+ "rewards/rejected": -0.009122253395617008,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9305438995361328,
567
+ "logits/rejected": -1.940913438796997,
568
+ "logps/chosen": -32.22040939331055,
569
+ "logps/rejected": -35.28728103637695,
570
+ "loss": 1.0138,
571
+ "rewards/accuracies": 0.48750001192092896,
572
+ "rewards/chosen": -0.012576865963637829,
573
+ "rewards/margins": -0.013758530840277672,
574
+ "rewards/rejected": 0.0011816651094704866,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.069648265838623,
581
+ "logits/rejected": -2.0630898475646973,
582
+ "logps/chosen": -33.63695526123047,
583
+ "logps/rejected": -29.226470947265625,
584
+ "loss": 0.988,
585
+ "rewards/accuracies": 0.5625,
586
+ "rewards/chosen": 0.012009462341666222,
587
+ "rewards/margins": 0.012046033516526222,
588
+ "rewards/rejected": -3.6573597753886133e-05,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.928865671157837,
595
+ "logits/rejected": -1.9310123920440674,
596
+ "logps/chosen": -34.243560791015625,
597
+ "logps/rejected": -30.892742156982422,
598
+ "loss": 0.9875,
599
+ "rewards/accuracies": 0.5249999761581421,
600
+ "rewards/chosen": -0.003606534795835614,
601
+ "rewards/margins": 0.012486499734222889,
602
+ "rewards/rejected": -0.01609303429722786,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.21957750196580764,
610
+ "train_runtime": 627.8531,
611
  "train_samples_per_second": 4.904,
612
  "train_steps_per_second": 0.613
613
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dcdc999fe444f3765c1759b69b7b761c87318fff78403404a07fca4e2fd2e2f
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cff3f16728b8d9571d3e16284fa5d4a5a3f3e9139edda68dfc9a3846a2ca29a
3
  size 4984