hugodk-sch commited on
Commit
2455f45
1 Parent(s): 23fa1a3

Model save

Browse files
README.md CHANGED
@@ -1,13 +1,11 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
 
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +16,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.0085
24
- - Rewards/chosen: -0.0006
25
- - Rewards/rejected: 0.0077
26
- - Rewards/accuracies: 0.4817
27
- - Rewards/margins: -0.0083
28
- - Logps/rejected: -37.5037
29
- - Logps/chosen: -34.0355
30
- - Logits/rejected: -2.2391
31
- - Logits/chosen: -2.2440
32
 
33
  ## Model description
34
 
@@ -63,9 +61,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
  |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 1.0007 | 0.26 | 100 | -2.2440 | -2.2391 | -34.0192 | -37.5035 | 0.9989 | 0.5403 | 0.0092 | 0.0013 | 0.0079 |
67
- | 0.9898 | 0.52 | 200 | -2.2440 | -2.2391 | -34.0138 | -37.5009 | 0.9970 | 0.5307 | 0.0124 | 0.0030 | 0.0094 |
68
- | 0.9846 | 0.78 | 300 | -2.2437 | -2.2388 | -34.0263 | -37.5052 | 1.0020 | 0.4635 | 0.0050 | -0.0019 | 0.0068 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
+ - alignment-handbook
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
9
  model-index:
10
  - name: aftonposten-6b-align-scan
11
  results: []
 
16
 
17
  # aftonposten-6b-align-scan
18
 
19
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Logits/chosen: -2.2441
22
+ - Logits/rejected: -2.2392
23
+ - Logps/chosen: -34.0123
24
+ - Logps/rejected: -37.5091
25
+ - Loss: 0.6902
26
+ - Rewards/accuracies: 0.5133
27
+ - Rewards/chosen: 0.0156
28
+ - Rewards/margins: 0.0103
29
+ - Rewards/rejected: 0.0052
30
 
31
  ## Model description
32
 
 
61
 
62
  | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
63
  |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
64
+ | 0.6821 | 0.26 | 100 | -2.2442 | -2.2393 | -34.0258 | -37.4952 | 0.6996 | 0.4805 | 0.0061 | -0.0088 | 0.0150 |
65
+ | 0.6938 | 0.52 | 200 | -2.2438 | -2.2389 | -34.0275 | -37.5027 | 0.6978 | 0.4817 | 0.0050 | -0.0048 | 0.0098 |
66
+ | 0.681 | 0.78 | 300 | -2.2441 | -2.2392 | -34.0123 | -37.5091 | 0.6902 | 0.5133 | 0.0156 | 0.0103 | 0.0052 |
67
 
68
 
69
  ### Framework versions
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
 
22
  "down_proj",
23
- "up_proj",
24
  "k_proj",
25
  "gate_proj",
26
- "o_proj",
27
- "v_proj",
28
- "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "q_proj",
23
+ "o_proj",
24
+ "v_proj",
25
  "down_proj",
 
26
  "k_proj",
27
  "gate_proj",
28
+ "up_proj"
 
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51387effbb49cdadf9979c5a56ff61a06766e3483674b168cbccd22db7c79182
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad833e10db9e8b4254a027fa666bb7c4f87d8c271d8f457b338262c20a03e39
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.243980884552002,
4
- "eval_logits/rejected": -2.2390992641448975,
5
- "eval_logps/chosen": -34.03550720214844,
6
- "eval_logps/rejected": -37.50373458862305,
7
- "eval_loss": 1.0085219144821167,
8
- "eval_rewards/accuracies": 0.48172760009765625,
9
- "eval_rewards/chosen": -0.0005728387623094022,
10
- "eval_rewards/margins": -0.008305290713906288,
11
- "eval_rewards/rejected": 0.007732452359050512,
12
- "eval_runtime": 146.1229,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.347,
15
- "eval_steps_per_second": 0.294,
16
- "train_loss": 0.21985046337177228,
17
- "train_runtime": 627.6127,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 4.906,
20
  "train_steps_per_second": 0.613
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2439703941345215,
4
+ "eval_logits/rejected": -2.2390873432159424,
5
+ "eval_logps/chosen": -34.037574768066406,
6
+ "eval_logps/rejected": -37.50224685668945,
7
+ "eval_loss": 0.7015302777290344,
8
+ "eval_rewards/accuracies": 0.4925249218940735,
9
+ "eval_rewards/chosen": -0.002115294337272644,
10
+ "eval_rewards/margins": -0.012174182571470737,
11
+ "eval_rewards/rejected": 0.010058889165520668,
12
+ "eval_runtime": 145.8307,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.352,
15
+ "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.15254525333255917,
17
+ "train_runtime": 628.1072,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 4.902,
20
  "train_steps_per_second": 0.613
21
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.243980884552002,
4
- "eval_logits/rejected": -2.2390992641448975,
5
- "eval_logps/chosen": -34.03550720214844,
6
- "eval_logps/rejected": -37.50373458862305,
7
- "eval_loss": 1.0085219144821167,
8
- "eval_rewards/accuracies": 0.48172760009765625,
9
- "eval_rewards/chosen": -0.0005728387623094022,
10
- "eval_rewards/margins": -0.008305290713906288,
11
- "eval_rewards/rejected": 0.007732452359050512,
12
- "eval_runtime": 146.1229,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.347,
15
- "eval_steps_per_second": 0.294
16
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2439703941345215,
4
+ "eval_logits/rejected": -2.2390873432159424,
5
+ "eval_logps/chosen": -34.037574768066406,
6
+ "eval_logps/rejected": -37.50224685668945,
7
+ "eval_loss": 0.7015302777290344,
8
+ "eval_rewards/accuracies": 0.4925249218940735,
9
+ "eval_rewards/chosen": -0.002115294337272644,
10
+ "eval_rewards/margins": -0.012174182571470737,
11
+ "eval_rewards/rejected": 0.010058889165520668,
12
+ "eval_runtime": 145.8307,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.352,
15
+ "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.21985046337177228,
4
- "train_runtime": 627.6127,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 4.906,
7
  "train_steps_per_second": 0.613
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.15254525333255917,
4
+ "train_runtime": 628.1072,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 4.902,
7
  "train_steps_per_second": 0.613
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.866068720817566,
29
- "logits/rejected": -1.8703863620758057,
30
- "logps/chosen": -36.99105453491211,
31
- "logps/rejected": -33.65992736816406,
32
- "loss": 0.9728,
33
- "rewards/accuracies": 0.5138888955116272,
34
- "rewards/chosen": 0.009325359016656876,
35
- "rewards/margins": 0.027240369468927383,
36
- "rewards/rejected": -0.017915012314915657,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.9970887899398804,
43
- "logits/rejected": -1.9997413158416748,
44
- "logps/chosen": -29.63595199584961,
45
- "logps/rejected": -29.070571899414062,
46
- "loss": 0.992,
47
- "rewards/accuracies": 0.5,
48
- "rewards/chosen": 0.003744622226804495,
49
- "rewards/margins": 0.008021654561161995,
50
- "rewards/rejected": -0.004277031868696213,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.9195963144302368,
57
- "logits/rejected": -1.9168994426727295,
58
- "logps/chosen": -31.414175033569336,
59
- "logps/rejected": -33.25041961669922,
60
- "loss": 0.9797,
61
- "rewards/accuracies": 0.5625,
62
- "rewards/chosen": 0.0011531396303325891,
63
- "rewards/margins": 0.02032935619354248,
64
- "rewards/rejected": -0.019176214933395386,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.01688814163208,
71
- "logits/rejected": -2.0081212520599365,
72
- "logps/chosen": -32.583614349365234,
73
- "logps/rejected": -32.48966979980469,
74
- "loss": 1.0179,
75
- "rewards/accuracies": 0.4375,
76
- "rewards/chosen": -0.004085154738277197,
77
- "rewards/margins": -0.017886672168970108,
78
- "rewards/rejected": 0.013801517896354198,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8644500970840454,
85
- "logits/rejected": -1.853655219078064,
86
- "logps/chosen": -33.54804229736328,
87
- "logps/rejected": -35.43767166137695,
88
- "loss": 1.0031,
89
- "rewards/accuracies": 0.4375,
90
- "rewards/chosen": 0.005664288066327572,
91
- "rewards/margins": -0.0031387731432914734,
92
- "rewards/rejected": 0.00880306214094162,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9449504613876343,
99
- "logits/rejected": -1.946890115737915,
100
- "logps/chosen": -32.58049774169922,
101
- "logps/rejected": -33.199378967285156,
102
- "loss": 0.9714,
103
- "rewards/accuracies": 0.612500011920929,
104
- "rewards/chosen": 0.011811850592494011,
105
- "rewards/margins": 0.02860717847943306,
106
- "rewards/rejected": -0.016795331612229347,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.0795464515686035,
113
- "logits/rejected": -2.0845184326171875,
114
- "logps/chosen": -33.98798370361328,
115
- "logps/rejected": -36.569732666015625,
116
- "loss": 0.9992,
117
- "rewards/accuracies": 0.48750001192092896,
118
- "rewards/chosen": 0.0003018675488419831,
119
- "rewards/margins": 0.0008206713828258216,
120
- "rewards/rejected": -0.0005188033101148903,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9425585269927979,
127
- "logits/rejected": -1.945704698562622,
128
- "logps/chosen": -34.408721923828125,
129
- "logps/rejected": -34.581504821777344,
130
- "loss": 0.9951,
131
- "rewards/accuracies": 0.5375000238418579,
132
- "rewards/chosen": 0.011452903971076012,
133
- "rewards/margins": 0.0049109989777207375,
134
- "rewards/rejected": 0.006541903130710125,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.9505088329315186,
141
- "logits/rejected": -1.9550243616104126,
142
- "logps/chosen": -32.451255798339844,
143
- "logps/rejected": -32.366790771484375,
144
- "loss": 0.9856,
145
- "rewards/accuracies": 0.512499988079071,
146
- "rewards/chosen": 0.009697502478957176,
147
- "rewards/margins": 0.014425436034798622,
148
- "rewards/rejected": -0.0047279344871640205,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.048947334289551,
155
- "logits/rejected": -2.046935796737671,
156
- "logps/chosen": -32.25363540649414,
157
- "logps/rejected": -31.277917861938477,
158
- "loss": 1.0007,
159
- "rewards/accuracies": 0.44999998807907104,
160
- "rewards/chosen": -0.007680465932935476,
161
- "rewards/margins": -0.0006578800384886563,
162
- "rewards/rejected": -0.007022587116807699,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.243957042694092,
168
- "eval_logits/rejected": -2.2390689849853516,
169
- "eval_logps/chosen": -34.019203186035156,
170
- "eval_logps/rejected": -37.50347900390625,
171
- "eval_loss": 0.998858630657196,
172
- "eval_rewards/accuracies": 0.5402824282646179,
173
- "eval_rewards/chosen": 0.009209612384438515,
174
- "eval_rewards/margins": 0.0013267018366605043,
175
- "eval_rewards/rejected": 0.00788290984928608,
176
- "eval_runtime": 145.9923,
177
- "eval_samples_per_second": 2.349,
178
- "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.005302667617798,
185
- "logits/rejected": -2.002894878387451,
186
- "logps/chosen": -33.23944854736328,
187
- "logps/rejected": -34.005104064941406,
188
- "loss": 1.0119,
189
- "rewards/accuracies": 0.44999998807907104,
190
- "rewards/chosen": 0.002622986678034067,
191
- "rewards/margins": -0.01194014959037304,
192
- "rewards/rejected": 0.014563137665390968,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.0167059898376465,
199
- "logits/rejected": -2.0083398818969727,
200
- "logps/chosen": -32.45713424682617,
201
- "logps/rejected": -32.17597198486328,
202
- "loss": 0.9987,
203
- "rewards/accuracies": 0.44999998807907104,
204
- "rewards/chosen": -0.0073361145332455635,
205
- "rewards/margins": 0.0013455990701913834,
206
- "rewards/rejected": -0.008681714534759521,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0464558601379395,
213
- "logits/rejected": -2.038412094116211,
214
- "logps/chosen": -30.482311248779297,
215
- "logps/rejected": -32.04933166503906,
216
- "loss": 1.0062,
217
- "rewards/accuracies": 0.48750001192092896,
218
- "rewards/chosen": -0.003956255037337542,
219
- "rewards/margins": -0.006162940990179777,
220
- "rewards/rejected": 0.002206685720011592,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.9769861698150635,
227
- "logits/rejected": -1.9872652292251587,
228
- "logps/chosen": -31.415592193603516,
229
- "logps/rejected": -32.5562858581543,
230
- "loss": 0.9847,
231
- "rewards/accuracies": 0.574999988079071,
232
- "rewards/chosen": 0.0024404728319495916,
233
- "rewards/margins": 0.015285758301615715,
234
- "rewards/rejected": -0.01284528523683548,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.890750527381897,
241
- "logits/rejected": -1.8918195962905884,
242
- "logps/chosen": -34.196632385253906,
243
- "logps/rejected": -34.74388885498047,
244
- "loss": 1.0104,
245
- "rewards/accuracies": 0.4000000059604645,
246
- "rewards/chosen": -0.002364098560065031,
247
- "rewards/margins": -0.0103833619505167,
248
- "rewards/rejected": 0.008019264787435532,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.9424158334732056,
255
- "logits/rejected": -1.9389365911483765,
256
- "logps/chosen": -36.13062286376953,
257
- "logps/rejected": -32.720314025878906,
258
- "loss": 0.9768,
259
- "rewards/accuracies": 0.637499988079071,
260
- "rewards/chosen": 0.02463117241859436,
261
- "rewards/margins": 0.023165332153439522,
262
- "rewards/rejected": 0.0014658428262919188,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.04191517829895,
269
- "logits/rejected": -2.034518003463745,
270
- "logps/chosen": -33.780067443847656,
271
- "logps/rejected": -31.35748863220215,
272
- "loss": 0.9859,
273
- "rewards/accuracies": 0.5375000238418579,
274
- "rewards/chosen": 0.010676576755940914,
275
- "rewards/margins": 0.014060018584132195,
276
- "rewards/rejected": -0.0033834422938525677,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0474770069122314,
283
- "logits/rejected": -2.0527572631835938,
284
- "logps/chosen": -32.51685333251953,
285
- "logps/rejected": -32.48542022705078,
286
- "loss": 0.9831,
287
- "rewards/accuracies": 0.5,
288
- "rewards/chosen": 0.01676076650619507,
289
- "rewards/margins": 0.016928378492593765,
290
- "rewards/rejected": -0.00016761067672632635,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.0486884117126465,
297
- "logits/rejected": -2.045897960662842,
298
- "logps/chosen": -31.490026473999023,
299
- "logps/rejected": -31.336299896240234,
300
- "loss": 0.9898,
301
- "rewards/accuracies": 0.5,
302
- "rewards/chosen": -0.0006444025784730911,
303
- "rewards/margins": 0.010206506587564945,
304
- "rewards/rejected": -0.010850909166038036,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9185329675674438,
311
- "logits/rejected": -1.9231984615325928,
312
- "logps/chosen": -31.579875946044922,
313
- "logps/rejected": -32.781455993652344,
314
- "loss": 0.9898,
315
- "rewards/accuracies": 0.574999988079071,
316
- "rewards/chosen": 0.015846019610762596,
317
- "rewards/margins": 0.010161412879824638,
318
- "rewards/rejected": 0.005684606730937958,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2439582347869873,
324
- "eval_logits/rejected": -2.239088296890259,
325
- "eval_logps/chosen": -34.01380920410156,
326
- "eval_logps/rejected": -37.500911712646484,
327
- "eval_loss": 0.9969704151153564,
328
- "eval_rewards/accuracies": 0.5307309031486511,
329
- "eval_rewards/chosen": 0.01244510430842638,
330
- "eval_rewards/margins": 0.0030194728169590235,
331
- "eval_rewards/rejected": 0.009425631724298,
332
- "eval_runtime": 145.8589,
333
- "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.0318081378936768,
341
- "logits/rejected": -2.0424768924713135,
342
- "logps/chosen": -31.95905113220215,
343
- "logps/rejected": -33.87731170654297,
344
- "loss": 0.9839,
345
- "rewards/accuracies": 0.574999988079071,
346
- "rewards/chosen": 0.007351105101406574,
347
- "rewards/margins": 0.01611880213022232,
348
- "rewards/rejected": -0.008767696097493172,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9253685474395752,
355
- "logits/rejected": -1.940233826637268,
356
- "logps/chosen": -30.08770179748535,
357
- "logps/rejected": -31.577301025390625,
358
- "loss": 0.9742,
359
- "rewards/accuracies": 0.612500011920929,
360
- "rewards/chosen": 0.014303619973361492,
361
- "rewards/margins": 0.025777745991945267,
362
- "rewards/rejected": -0.011474122293293476,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.9832963943481445,
369
- "logits/rejected": -1.9872655868530273,
370
- "logps/chosen": -33.39131546020508,
371
- "logps/rejected": -31.548086166381836,
372
- "loss": 0.985,
373
- "rewards/accuracies": 0.574999988079071,
374
- "rewards/chosen": 0.015694385394454002,
375
- "rewards/margins": 0.014964587986469269,
376
- "rewards/rejected": 0.0007297966512851417,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9828859567642212,
383
- "logits/rejected": -1.9609191417694092,
384
- "logps/chosen": -34.16459655761719,
385
- "logps/rejected": -34.95134735107422,
386
- "loss": 1.0062,
387
- "rewards/accuracies": 0.4124999940395355,
388
- "rewards/chosen": -0.0065087564289569855,
389
- "rewards/margins": -0.006178082898259163,
390
- "rewards/rejected": -0.0003306727739982307,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0246593952178955,
397
- "logits/rejected": -2.0213539600372314,
398
- "logps/chosen": -32.9222412109375,
399
- "logps/rejected": -36.228416442871094,
400
- "loss": 0.9962,
401
- "rewards/accuracies": 0.4749999940395355,
402
- "rewards/chosen": 0.0016961356159299612,
403
- "rewards/margins": 0.0037924889475107193,
404
- "rewards/rejected": -0.0020963551942259073,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.8918907642364502,
411
- "logits/rejected": -1.8894577026367188,
412
- "logps/chosen": -34.189327239990234,
413
- "logps/rejected": -35.505496978759766,
414
- "loss": 1.0,
415
- "rewards/accuracies": 0.5,
416
- "rewards/chosen": -0.0005569729837588966,
417
- "rewards/margins": -2.999706157424953e-05,
418
- "rewards/rejected": -0.0005269756657071412,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.8758213520050049,
425
- "logits/rejected": -1.8732786178588867,
426
- "logps/chosen": -34.38149642944336,
427
- "logps/rejected": -31.752059936523438,
428
- "loss": 1.0019,
429
- "rewards/accuracies": 0.550000011920929,
430
- "rewards/chosen": 0.009660609066486359,
431
- "rewards/margins": -0.0019017171580344439,
432
- "rewards/rejected": 0.011562327854335308,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9798027276992798,
439
- "logits/rejected": -1.9691746234893799,
440
- "logps/chosen": -35.328853607177734,
441
- "logps/rejected": -31.868383407592773,
442
- "loss": 0.9669,
443
- "rewards/accuracies": 0.5375000238418579,
444
- "rewards/chosen": 0.016671547666192055,
445
- "rewards/margins": 0.03310702368617058,
446
- "rewards/rejected": -0.016435474157333374,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.075648546218872,
453
- "logits/rejected": -2.0605978965759277,
454
- "logps/chosen": -30.926809310913086,
455
- "logps/rejected": -32.621070861816406,
456
- "loss": 1.0201,
457
- "rewards/accuracies": 0.4124999940395355,
458
- "rewards/chosen": -0.0016108205309137702,
459
- "rewards/margins": -0.020070534199476242,
460
- "rewards/rejected": 0.018459713086485863,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.9466993808746338,
467
- "logits/rejected": -1.9441711902618408,
468
- "logps/chosen": -32.90770721435547,
469
- "logps/rejected": -30.82305335998535,
470
- "loss": 0.9846,
471
- "rewards/accuracies": 0.5249999761581421,
472
- "rewards/chosen": 0.011643724516034126,
473
- "rewards/margins": 0.015391023829579353,
474
- "rewards/rejected": -0.0037472969852387905,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.243670701980591,
480
- "eval_logits/rejected": -2.2387943267822266,
481
- "eval_logps/chosen": -34.02626419067383,
482
- "eval_logps/rejected": -37.50523376464844,
483
- "eval_loss": 1.001994013786316,
484
- "eval_rewards/accuracies": 0.4634551703929901,
485
- "eval_rewards/chosen": 0.0049731116741895676,
486
- "eval_rewards/margins": -0.0018590801628306508,
487
- "eval_rewards/rejected": 0.006832191254943609,
488
- "eval_runtime": 145.8336,
489
- "eval_samples_per_second": 2.352,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9287983179092407,
497
- "logits/rejected": -1.9255239963531494,
498
- "logps/chosen": -31.573007583618164,
499
- "logps/rejected": -33.742637634277344,
500
- "loss": 0.9815,
501
- "rewards/accuracies": 0.6000000238418579,
502
- "rewards/chosen": 0.018206708133220673,
503
- "rewards/margins": 0.018496429547667503,
504
- "rewards/rejected": -0.0002897246740758419,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9805524349212646,
511
- "logits/rejected": -1.9682422876358032,
512
- "logps/chosen": -34.57670211791992,
513
- "logps/rejected": -33.561363220214844,
514
- "loss": 0.9749,
515
- "rewards/accuracies": 0.574999988079071,
516
- "rewards/chosen": 0.011624744161963463,
517
- "rewards/margins": 0.0251374039798975,
518
- "rewards/rejected": -0.013512656092643738,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.016091823577881,
525
- "logits/rejected": -2.0146238803863525,
526
- "logps/chosen": -33.47222137451172,
527
- "logps/rejected": -32.46577835083008,
528
- "loss": 1.0001,
529
- "rewards/accuracies": 0.4749999940395355,
530
- "rewards/chosen": 0.008143280632793903,
531
- "rewards/margins": -0.0001393534184899181,
532
- "rewards/rejected": 0.008282631635665894,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.1032156944274902,
539
- "logits/rejected": -2.0874197483062744,
540
- "logps/chosen": -34.174861907958984,
541
- "logps/rejected": -33.088890075683594,
542
- "loss": 1.0148,
543
- "rewards/accuracies": 0.5249999761581421,
544
- "rewards/chosen": 0.002716648392379284,
545
- "rewards/margins": -0.014771336689591408,
546
- "rewards/rejected": 0.017487986013293266,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9747329950332642,
553
- "logits/rejected": -1.9737837314605713,
554
- "logps/chosen": -33.25817108154297,
555
- "logps/rejected": -32.459861755371094,
556
- "loss": 0.9888,
557
- "rewards/accuracies": 0.44999998807907104,
558
- "rewards/chosen": 0.014356844127178192,
559
- "rewards/margins": 0.01118617132306099,
560
- "rewards/rejected": 0.003170671407133341,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9303538799285889,
567
- "logits/rejected": -1.94071364402771,
568
- "logps/chosen": -32.20769500732422,
569
- "logps/rejected": -35.273780822753906,
570
- "loss": 1.0108,
571
- "rewards/accuracies": 0.44999998807907104,
572
- "rewards/chosen": -0.0018019669223576784,
573
- "rewards/margins": -0.010786842554807663,
574
- "rewards/rejected": 0.008984875865280628,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.069509744644165,
581
- "logits/rejected": -2.0629658699035645,
582
- "logps/chosen": -33.647117614746094,
583
- "logps/rejected": -29.217060089111328,
584
- "loss": 1.0027,
585
- "rewards/accuracies": 0.48750001192092896,
586
- "rewards/chosen": 0.002911838237196207,
587
- "rewards/margins": -0.0027081891894340515,
588
- "rewards/rejected": 0.005620025563985109,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.929449439048767,
595
- "logits/rejected": -1.9315989017486572,
596
- "logps/chosen": -34.25099563598633,
597
- "logps/rejected": -30.906116485595703,
598
- "loss": 0.9871,
599
- "rewards/accuracies": 0.5249999761581421,
600
- "rewards/chosen": -0.007165629416704178,
601
- "rewards/margins": 0.012927901931107044,
602
- "rewards/rejected": -0.020093530416488647,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.21985046337177228,
610
- "train_runtime": 627.6127,
611
- "train_samples_per_second": 4.906,
612
  "train_steps_per_second": 0.613
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.866014003753662,
29
+ "logits/rejected": -1.8703253269195557,
30
+ "logps/chosen": -36.98957061767578,
31
+ "logps/rejected": -33.655086517333984,
32
+ "loss": 0.6802,
33
+ "rewards/accuracies": 0.5555555820465088,
34
+ "rewards/chosen": 0.01192101463675499,
35
+ "rewards/margins": 0.029430679976940155,
36
+ "rewards/rejected": -0.017509667202830315,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.9975181818008423,
43
+ "logits/rejected": -2.000157356262207,
44
+ "logps/chosen": -29.643035888671875,
45
+ "logps/rejected": -29.068897247314453,
46
+ "loss": 0.6931,
47
+ "rewards/accuracies": 0.4625000059604645,
48
+ "rewards/chosen": -0.0005901128170080483,
49
+ "rewards/margins": 0.003226220142096281,
50
+ "rewards/rejected": -0.003816334530711174,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.919750452041626,
57
+ "logits/rejected": -1.9170730113983154,
58
+ "logps/chosen": -31.426767349243164,
59
+ "logps/rejected": -33.24191665649414,
60
+ "loss": 0.6908,
61
+ "rewards/accuracies": 0.5874999761581421,
62
+ "rewards/chosen": -0.00747058168053627,
63
+ "rewards/margins": 0.008947307243943214,
64
+ "rewards/rejected": -0.016417888924479485,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.0170185565948486,
71
+ "logits/rejected": -2.008249282836914,
72
+ "logps/chosen": -32.58369827270508,
73
+ "logps/rejected": -32.508426666259766,
74
+ "loss": 0.6989,
75
+ "rewards/accuracies": 0.48750001192092896,
76
+ "rewards/chosen": -0.004824736155569553,
77
+ "rewards/margins": -0.007793365977704525,
78
+ "rewards/rejected": 0.0029686305206269026,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.864098310470581,
85
+ "logits/rejected": -1.8533239364624023,
86
+ "logps/chosen": -33.5538444519043,
87
+ "logps/rejected": -35.4467658996582,
88
+ "loss": 0.6956,
89
+ "rewards/accuracies": 0.4625000059604645,
90
+ "rewards/chosen": 0.002545009134337306,
91
+ "rewards/margins": -0.0013612977927550673,
92
+ "rewards/rejected": 0.003906308673322201,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9450995922088623,
99
+ "logits/rejected": -1.9470382928848267,
100
+ "logps/chosen": -32.56639862060547,
101
+ "logps/rejected": -33.19929122924805,
102
+ "loss": 0.6743,
103
+ "rewards/accuracies": 0.550000011920929,
104
+ "rewards/chosen": 0.02365189418196678,
105
+ "rewards/margins": 0.043189071118831635,
106
+ "rewards/rejected": -0.019537178799510002,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.0796637535095215,
113
+ "logits/rejected": -2.084643602371216,
114
+ "logps/chosen": -33.98316955566406,
115
+ "logps/rejected": -36.59341049194336,
116
+ "loss": 0.686,
117
+ "rewards/accuracies": 0.5375000238418579,
118
+ "rewards/chosen": 0.00371909374371171,
119
+ "rewards/margins": 0.020896537229418755,
120
+ "rewards/rejected": -0.01717744767665863,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.9420992136001587,
127
+ "logits/rejected": -1.9452590942382812,
128
+ "logps/chosen": -34.40605545043945,
129
+ "logps/rejected": -34.58306884765625,
130
+ "loss": 0.6914,
131
+ "rewards/accuracies": 0.512499988079071,
132
+ "rewards/chosen": 0.015227687545120716,
133
+ "rewards/margins": 0.008690384216606617,
134
+ "rewards/rejected": 0.006537304259836674,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.950547456741333,
141
+ "logits/rejected": -1.9550635814666748,
142
+ "logps/chosen": -32.4520378112793,
143
+ "logps/rejected": -32.369686126708984,
144
+ "loss": 0.6861,
145
+ "rewards/accuracies": 0.5625,
146
+ "rewards/chosen": 0.010769412852823734,
147
+ "rewards/margins": 0.018315035849809647,
148
+ "rewards/rejected": -0.007545621134340763,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.0487563610076904,
155
+ "logits/rejected": -2.046765089035034,
156
+ "logps/chosen": -32.21302032470703,
157
+ "logps/rejected": -31.275714874267578,
158
+ "loss": 0.6821,
159
+ "rewards/accuracies": 0.574999988079071,
160
+ "rewards/chosen": 0.019469190388917923,
161
+ "rewards/margins": 0.02612192928791046,
162
+ "rewards/rejected": -0.006652742624282837,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.244154930114746,
168
+ "eval_logits/rejected": -2.2392725944519043,
169
+ "eval_logps/chosen": -34.02579116821289,
170
+ "eval_logps/rejected": -37.495243072509766,
171
+ "eval_loss": 0.6996207237243652,
172
+ "eval_rewards/accuracies": 0.48048171401023865,
173
+ "eval_rewards/chosen": 0.006134189199656248,
174
+ "eval_rewards/margins": -0.008827367797493935,
175
+ "eval_rewards/rejected": 0.014961558394134045,
176
+ "eval_runtime": 146.1618,
177
+ "eval_samples_per_second": 2.347,
178
+ "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.005396604537964,
185
+ "logits/rejected": -2.00298810005188,
186
+ "logps/chosen": -33.235660552978516,
187
+ "logps/rejected": -34.023067474365234,
188
+ "loss": 0.6946,
189
+ "rewards/accuracies": 0.4749999940395355,
190
+ "rewards/chosen": 0.0057089305482804775,
191
+ "rewards/margins": 0.001296796603128314,
192
+ "rewards/rejected": 0.0044121332466602325,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.0165247917175293,
199
+ "logits/rejected": -2.0081515312194824,
200
+ "logps/chosen": -32.45998764038086,
201
+ "logps/rejected": -32.18000793457031,
202
+ "loss": 0.6936,
203
+ "rewards/accuracies": 0.550000011920929,
204
+ "rewards/chosen": -0.010559305548667908,
205
+ "rewards/margins": 0.0023918983060866594,
206
+ "rewards/rejected": -0.012951202690601349,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.0464999675750732,
213
+ "logits/rejected": -2.0384459495544434,
214
+ "logps/chosen": -30.482324600219727,
215
+ "logps/rejected": -32.024261474609375,
216
+ "loss": 0.7073,
217
+ "rewards/accuracies": 0.36250001192092896,
218
+ "rewards/chosen": -0.004625456873327494,
219
+ "rewards/margins": -0.02474728412926197,
220
+ "rewards/rejected": 0.020121825858950615,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.976978063583374,
227
+ "logits/rejected": -1.9872522354125977,
228
+ "logps/chosen": -31.402069091796875,
229
+ "logps/rejected": -32.54044723510742,
230
+ "loss": 0.6869,
231
+ "rewards/accuracies": 0.550000011920929,
232
+ "rewards/chosen": 0.012314395979046822,
233
+ "rewards/margins": 0.01621088758111,
234
+ "rewards/rejected": -0.0038964927662163973,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.8904860019683838,
241
+ "logits/rejected": -1.8915693759918213,
242
+ "logps/chosen": -34.20112609863281,
243
+ "logps/rejected": -34.765201568603516,
244
+ "loss": 0.6961,
245
+ "rewards/accuracies": 0.44999998807907104,
246
+ "rewards/chosen": -0.005904030986130238,
247
+ "rewards/margins": -0.0003432026132941246,
248
+ "rewards/rejected": -0.005560829304158688,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.9423534870147705,
255
+ "logits/rejected": -1.938856840133667,
256
+ "logps/chosen": -36.14204025268555,
257
+ "logps/rejected": -32.7347412109375,
258
+ "loss": 0.6817,
259
+ "rewards/accuracies": 0.5625,
260
+ "rewards/chosen": 0.020746376365423203,
261
+ "rewards/margins": 0.029139723628759384,
262
+ "rewards/rejected": -0.008393346332013607,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.0420148372650146,
269
+ "logits/rejected": -2.034619092941284,
270
+ "logps/chosen": -33.79521942138672,
271
+ "logps/rejected": -31.3667049407959,
272
+ "loss": 0.6897,
273
+ "rewards/accuracies": 0.5625,
274
+ "rewards/chosen": 0.0018501380691304803,
275
+ "rewards/margins": 0.012249214574694633,
276
+ "rewards/rejected": -0.010399078950285912,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.0474741458892822,
283
+ "logits/rejected": -2.052746295928955,
284
+ "logps/chosen": -32.533199310302734,
285
+ "logps/rejected": -32.49622344970703,
286
+ "loss": 0.6872,
287
+ "rewards/accuracies": 0.48750001192092896,
288
+ "rewards/chosen": 0.008109848015010357,
289
+ "rewards/margins": 0.015868691727519035,
290
+ "rewards/rejected": -0.007758840918540955,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.048755407333374,
297
+ "logits/rejected": -2.045989513397217,
298
+ "logps/chosen": -31.495113372802734,
299
+ "logps/rejected": -31.329687118530273,
300
+ "loss": 0.6936,
301
+ "rewards/accuracies": 0.550000011920929,
302
+ "rewards/chosen": -0.004311963450163603,
303
+ "rewards/margins": 0.003718080697581172,
304
+ "rewards/rejected": -0.008030043914914131,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9183790683746338,
311
+ "logits/rejected": -1.9230560064315796,
312
+ "logps/chosen": -31.61056137084961,
313
+ "logps/rejected": -32.79776382446289,
314
+ "loss": 0.6938,
315
+ "rewards/accuracies": 0.48750001192092896,
316
+ "rewards/chosen": -0.002994548762217164,
317
+ "rewards/margins": 0.001788289868272841,
318
+ "rewards/rejected": -0.004782836884260178,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2438013553619385,
324
+ "eval_logits/rejected": -2.2389278411865234,
325
+ "eval_logps/chosen": -34.02748107910156,
326
+ "eval_logps/rejected": -37.502662658691406,
327
+ "eval_loss": 0.6977797150611877,
328
+ "eval_rewards/accuracies": 0.48172760009765625,
329
+ "eval_rewards/chosen": 0.004952840972691774,
330
+ "eval_rewards/margins": -0.004817771725356579,
331
+ "eval_rewards/rejected": 0.009770614095032215,
332
+ "eval_runtime": 146.0018,
333
+ "eval_samples_per_second": 2.349,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0318925380706787,
341
+ "logits/rejected": -2.0425820350646973,
342
+ "logps/chosen": -31.96123695373535,
343
+ "logps/rejected": -33.87626647949219,
344
+ "loss": 0.6874,
345
+ "rewards/accuracies": 0.5249999761581421,
346
+ "rewards/chosen": 0.007046692073345184,
347
+ "rewards/margins": 0.01654236949980259,
348
+ "rewards/rejected": -0.009495675563812256,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.9252264499664307,
355
+ "logits/rejected": -1.940086007118225,
356
+ "logps/chosen": -30.087350845336914,
357
+ "logps/rejected": -31.552490234375,
358
+ "loss": 0.6894,
359
+ "rewards/accuracies": 0.4749999940395355,
360
+ "rewards/chosen": 0.01693141832947731,
361
+ "rewards/margins": 0.012951460666954517,
362
+ "rewards/rejected": 0.003979954868555069,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.9830553531646729,
369
+ "logits/rejected": -1.9870340824127197,
370
+ "logps/chosen": -33.37425231933594,
371
+ "logps/rejected": -31.55866050720215,
372
+ "loss": 0.6771,
373
+ "rewards/accuracies": 0.5375000238418579,
374
+ "rewards/chosen": 0.030249694362282753,
375
+ "rewards/margins": 0.036800943315029144,
376
+ "rewards/rejected": -0.006551248021423817,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.9825870990753174,
383
+ "logits/rejected": -1.9606201648712158,
384
+ "logps/chosen": -34.172298431396484,
385
+ "logps/rejected": -34.95369338989258,
386
+ "loss": 0.7012,
387
+ "rewards/accuracies": 0.4000000059604645,
388
+ "rewards/chosen": -0.012983955442905426,
389
+ "rewards/margins": -0.010954022407531738,
390
+ "rewards/rejected": -0.002029931638389826,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.0240674018859863,
397
+ "logits/rejected": -2.020759105682373,
398
+ "logps/chosen": -32.91376495361328,
399
+ "logps/rejected": -36.22763442993164,
400
+ "loss": 0.6907,
401
+ "rewards/accuracies": 0.512499988079071,
402
+ "rewards/chosen": 0.007910441607236862,
403
+ "rewards/margins": 0.009807790629565716,
404
+ "rewards/rejected": -0.0018973512342199683,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.891227126121521,
411
+ "logits/rejected": -1.8887770175933838,
412
+ "logps/chosen": -34.196754455566406,
413
+ "logps/rejected": -35.51189422607422,
414
+ "loss": 0.6959,
415
+ "rewards/accuracies": 0.5625,
416
+ "rewards/chosen": -0.005844797007739544,
417
+ "rewards/margins": -0.0007547728600911796,
418
+ "rewards/rejected": -0.005090024787932634,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.875758409500122,
425
+ "logits/rejected": -1.8732105493545532,
426
+ "logps/chosen": -34.38095474243164,
427
+ "logps/rejected": -31.74618148803711,
428
+ "loss": 0.6979,
429
+ "rewards/accuracies": 0.44999998807907104,
430
+ "rewards/chosen": 0.011651447974145412,
431
+ "rewards/margins": -0.005950801074504852,
432
+ "rewards/rejected": 0.01760224997997284,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.9794175624847412,
439
+ "logits/rejected": -1.968788504600525,
440
+ "logps/chosen": -35.32674026489258,
441
+ "logps/rejected": -31.84674072265625,
442
+ "loss": 0.6832,
443
+ "rewards/accuracies": 0.512499988079071,
444
+ "rewards/chosen": 0.020933207124471664,
445
+ "rewards/margins": 0.024957498535513878,
446
+ "rewards/rejected": -0.004024294205009937,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.075899600982666,
453
+ "logits/rejected": -2.0608580112457275,
454
+ "logps/chosen": -30.886249542236328,
455
+ "logps/rejected": -32.66474533081055,
456
+ "loss": 0.6773,
457
+ "rewards/accuracies": 0.5625,
458
+ "rewards/chosen": 0.026513904333114624,
459
+ "rewards/margins": 0.03555149585008621,
460
+ "rewards/rejected": -0.009037593379616737,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.9468847513198853,
467
+ "logits/rejected": -1.9443557262420654,
468
+ "logps/chosen": -32.885986328125,
469
+ "logps/rejected": -30.816707611083984,
470
+ "loss": 0.681,
471
+ "rewards/accuracies": 0.5,
472
+ "rewards/chosen": 0.02878658100962639,
473
+ "rewards/margins": 0.028715472668409348,
474
+ "rewards/rejected": 7.110489241313189e-05,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.244056224822998,
480
+ "eval_logits/rejected": -2.2391812801361084,
481
+ "eval_logps/chosen": -34.01231384277344,
482
+ "eval_logps/rejected": -37.509132385253906,
483
+ "eval_loss": 0.6901772618293762,
484
+ "eval_rewards/accuracies": 0.5132890343666077,
485
+ "eval_rewards/chosen": 0.015566298738121986,
486
+ "eval_rewards/margins": 0.01032568421214819,
487
+ "eval_rewards/rejected": 0.005240614525973797,
488
+ "eval_runtime": 145.9901,
489
+ "eval_samples_per_second": 2.349,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.9292223453521729,
497
+ "logits/rejected": -1.9259742498397827,
498
+ "logps/chosen": -31.571924209594727,
499
+ "logps/rejected": -33.74208450317383,
500
+ "loss": 0.6842,
501
+ "rewards/accuracies": 0.5625,
502
+ "rewards/chosen": 0.021998558193445206,
503
+ "rewards/margins": 0.02195030450820923,
504
+ "rewards/rejected": 4.825703945243731e-05,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.980800986289978,
511
+ "logits/rejected": -1.9684827327728271,
512
+ "logps/chosen": -34.57365417480469,
513
+ "logps/rejected": -33.5623779296875,
514
+ "loss": 0.6796,
515
+ "rewards/accuracies": 0.5874999761581421,
516
+ "rewards/chosen": 0.015695111826062202,
517
+ "rewards/margins": 0.032171379774808884,
518
+ "rewards/rejected": -0.01647626794874668,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.016021490097046,
525
+ "logits/rejected": -2.0145699977874756,
526
+ "logps/chosen": -33.47764205932617,
527
+ "logps/rejected": -32.47804260253906,
528
+ "loss": 0.6932,
529
+ "rewards/accuracies": 0.512499988079071,
530
+ "rewards/chosen": 0.005706763360649347,
531
+ "rewards/margins": 0.00462835980579257,
532
+ "rewards/rejected": 0.0010784054175019264,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.1033365726470947,
539
+ "logits/rejected": -2.087541103363037,
540
+ "logps/chosen": -34.160888671875,
541
+ "logps/rejected": -33.08522415161133,
542
+ "loss": 0.7012,
543
+ "rewards/accuracies": 0.48750001192092896,
544
+ "rewards/chosen": 0.012950187548995018,
545
+ "rewards/margins": -0.010019468143582344,
546
+ "rewards/rejected": 0.022969653829932213,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.974867582321167,
553
+ "logits/rejected": -1.9739230871200562,
554
+ "logps/chosen": -33.23981857299805,
555
+ "logps/rejected": -32.456180572509766,
556
+ "loss": 0.6846,
557
+ "rewards/accuracies": 0.5249999761581421,
558
+ "rewards/chosen": 0.029593482613563538,
559
+ "rewards/margins": 0.023315051570534706,
560
+ "rewards/rejected": 0.006278430111706257,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9302256107330322,
567
+ "logits/rejected": -1.940580129623413,
568
+ "logps/chosen": -32.2147102355957,
569
+ "logps/rejected": -35.29616165161133,
570
+ "loss": 0.6963,
571
+ "rewards/accuracies": 0.5375000238418579,
572
+ "rewards/chosen": -0.007009805645793676,
573
+ "rewards/margins": -0.0018279661890119314,
574
+ "rewards/rejected": -0.005181840155273676,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.069504737854004,
581
+ "logits/rejected": -2.062945604324341,
582
+ "logps/chosen": -33.646629333496094,
583
+ "logps/rejected": -29.217721939086914,
584
+ "loss": 0.6968,
585
+ "rewards/accuracies": 0.4749999940395355,
586
+ "rewards/chosen": 0.003734806552529335,
587
+ "rewards/margins": -0.0023581120185554028,
588
+ "rewards/rejected": 0.0060929167084395885,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.9288103580474854,
595
+ "logits/rejected": -1.9309613704681396,
596
+ "logps/chosen": -34.23542022705078,
597
+ "logps/rejected": -30.89495277404785,
598
+ "loss": 0.6866,
599
+ "rewards/accuracies": 0.5625,
600
+ "rewards/chosen": 0.0025402153842151165,
601
+ "rewards/margins": 0.018168382346630096,
602
+ "rewards/rejected": -0.015628164634108543,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.15254525333255917,
610
+ "train_runtime": 628.1072,
611
+ "train_samples_per_second": 4.902,
612
  "train_steps_per_second": 0.613
613
  }
614
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a26fc54db51500ff032def9cd7c93907331800443ff434cbd6342d49b66b060
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:132c7af23e7be6513ca04c409bb78d8e95ded32cd1a07d465fdf23eb192ce28e
3
  size 4984