hugodk-sch commited on
Commit
8b3c3e0
1 Parent(s): be9daab

Model save

Browse files
README.md CHANGED
@@ -1,13 +1,11 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
 
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +16,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.6990
24
- - Rewards/chosen: 0.0098
25
- - Rewards/rejected: 0.0133
26
- - Rewards/accuracies: 0.4996
27
- - Rewards/margins: -0.0035
28
- - Logps/rejected: -37.5019
29
- - Logps/chosen: -34.0236
30
- - Logits/rejected: -2.2393
31
- - Logits/chosen: -2.2442
32
 
33
  ## Model description
34
 
@@ -63,9 +61,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
  |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.6951 | 0.26 | 100 | -2.2438 | -2.2389 | -34.0252 | -37.5076 | 0.6966 | 0.4954 | 0.0084 | 0.0003 | 0.0081 |
67
- | 0.6891 | 0.52 | 200 | -2.2432 | -2.2384 | -34.0243 | -37.5115 | 0.6947 | 0.4934 | 0.0092 | 0.0046 | 0.0046 |
68
- | 0.693 | 0.78 | 300 | -2.2437 | -2.2388 | -34.0172 | -37.4976 | 0.6982 | 0.4983 | 0.0156 | -0.0015 | 0.0171 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
+ - alignment-handbook
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
9
  model-index:
10
  - name: aftonposten-6b-align-scan
11
  results: []
 
16
 
17
  # aftonposten-6b-align-scan
18
 
19
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Logits/chosen: -2.2444
22
+ - Logits/rejected: -2.2395
23
+ - Logps/chosen: -34.0056
24
+ - Logps/rejected: -37.4890
25
+ - Loss: 0.4994
26
+ - Rewards/accuracies: 0.4988
27
+ - Rewards/chosen: 0.0261
28
+ - Rewards/margins: 0.0012
29
+ - Rewards/rejected: 0.0249
30
 
31
  ## Model description
32
 
 
61
 
62
  | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
63
  |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
64
+ | 0.5014 | 0.26 | 100 | -2.2435 | -2.2386 | -34.0005 | -37.4935 | 0.4983 | 0.5307 | 0.0306 | 0.0098 | 0.0208 |
65
+ | 0.4978 | 0.52 | 200 | -2.2443 | -2.2395 | -34.0014 | -37.4845 | 0.4998 | 0.5104 | 0.0298 | 0.0009 | 0.0289 |
66
+ | 0.4964 | 0.78 | 300 | -2.2444 | -2.2395 | -34.0056 | -37.4890 | 0.4994 | 0.4988 | 0.0261 | 0.0012 | 0.0249 |
67
 
68
 
69
  ### Framework versions
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
22
  "up_proj",
23
- "v_proj",
24
  "k_proj",
25
- "q_proj",
26
- "gate_proj",
27
  "down_proj",
28
- "o_proj"
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "gate_proj",
23
+ "o_proj",
24
  "up_proj",
 
25
  "k_proj",
 
 
26
  "down_proj",
27
+ "v_proj",
28
+ "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b30a3614e90564741bd0b06f4b5d211ac9ab0d5d4651e82dec2b571e49a407d7
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07f19c50aa7f1e083760dcc58e66774fce21075bcdfe63ff7236835de16cb27e
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.244168758392334,
4
- "eval_logits/rejected": -2.239292860031128,
5
- "eval_logps/chosen": -34.02364730834961,
6
- "eval_logps/rejected": -37.501853942871094,
7
- "eval_loss": 0.6989675760269165,
8
- "eval_rewards/accuracies": 0.49958473443984985,
9
- "eval_rewards/chosen": 0.009816422127187252,
10
- "eval_rewards/margins": -0.0034731123596429825,
11
- "eval_rewards/rejected": 0.01328953169286251,
12
- "eval_runtime": 146.1437,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.347,
15
- "eval_steps_per_second": 0.294,
16
- "train_loss": 0.15265155210123435,
17
- "train_runtime": 627.689,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 4.905,
20
- "train_steps_per_second": 0.613
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2438042163848877,
4
+ "eval_logits/rejected": -2.23892879486084,
5
+ "eval_logps/chosen": -33.9998779296875,
6
+ "eval_logps/rejected": -37.478267669677734,
7
+ "eval_loss": 0.5009505748748779,
8
+ "eval_rewards/accuracies": 0.5074750781059265,
9
+ "eval_rewards/chosen": 0.031207971274852753,
10
+ "eval_rewards/margins": -0.003311639651656151,
11
+ "eval_rewards/rejected": 0.03451961278915405,
12
+ "eval_runtime": 145.8121,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.352,
15
+ "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.10993070664343896,
17
+ "train_runtime": 627.3649,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 4.908,
20
+ "train_steps_per_second": 0.614
21
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.244168758392334,
4
- "eval_logits/rejected": -2.239292860031128,
5
- "eval_logps/chosen": -34.02364730834961,
6
- "eval_logps/rejected": -37.501853942871094,
7
- "eval_loss": 0.6989675760269165,
8
- "eval_rewards/accuracies": 0.49958473443984985,
9
- "eval_rewards/chosen": 0.009816422127187252,
10
- "eval_rewards/margins": -0.0034731123596429825,
11
- "eval_rewards/rejected": 0.01328953169286251,
12
- "eval_runtime": 146.1437,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.347,
15
- "eval_steps_per_second": 0.294
16
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2438042163848877,
4
+ "eval_logits/rejected": -2.23892879486084,
5
+ "eval_logps/chosen": -33.9998779296875,
6
+ "eval_logps/rejected": -37.478267669677734,
7
+ "eval_loss": 0.5009505748748779,
8
+ "eval_rewards/accuracies": 0.5074750781059265,
9
+ "eval_rewards/chosen": 0.031207971274852753,
10
+ "eval_rewards/margins": -0.003311639651656151,
11
+ "eval_rewards/rejected": 0.03451961278915405,
12
+ "eval_runtime": 145.8121,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.352,
15
+ "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.15265155210123435,
4
- "train_runtime": 627.689,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 4.905,
7
- "train_steps_per_second": 0.613
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.10993070664343896,
4
+ "train_runtime": 627.3649,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 4.908,
7
+ "train_steps_per_second": 0.614
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,591 +25,591 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.866059422492981,
29
- "logits/rejected": -1.870389699935913,
30
- "logps/chosen": -36.98250198364258,
31
- "logps/rejected": -33.64270782470703,
32
- "loss": 0.6804,
33
- "rewards/accuracies": 0.4027777910232544,
34
- "rewards/chosen": 0.021689780056476593,
35
- "rewards/margins": 0.033059459179639816,
36
- "rewards/rejected": -0.011369682848453522,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.9973828792572021,
43
- "logits/rejected": -2.0000317096710205,
44
- "logps/chosen": -29.631702423095703,
45
- "logps/rejected": -29.05877113342285,
46
- "loss": 0.6937,
47
- "rewards/accuracies": 0.512499988079071,
48
- "rewards/chosen": 0.009445475414395332,
49
- "rewards/margins": 0.005240145605057478,
50
- "rewards/rejected": 0.004205327946692705,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.9198739528656006,
57
- "logits/rejected": -1.9171788692474365,
58
- "logps/chosen": -31.4038028717041,
59
- "logps/rejected": -33.20899963378906,
60
- "loss": 0.696,
61
- "rewards/accuracies": 0.5,
62
- "rewards/chosen": 0.01106296293437481,
63
- "rewards/margins": 0.0025471807457506657,
64
- "rewards/rejected": 0.008515783585608006,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.0166726112365723,
71
- "logits/rejected": -2.0079195499420166,
72
- "logps/chosen": -32.57840347290039,
73
- "logps/rejected": -32.493370056152344,
74
- "loss": 0.7066,
75
- "rewards/accuracies": 0.48750001192092896,
76
- "rewards/chosen": -0.00143451988697052,
77
- "rewards/margins": -0.018809262663125992,
78
- "rewards/rejected": 0.017374742776155472,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8648544549942017,
85
- "logits/rejected": -1.8540741205215454,
86
- "logps/chosen": -33.54566955566406,
87
- "logps/rejected": -35.43162155151367,
88
- "loss": 0.7004,
89
- "rewards/accuracies": 0.4749999940395355,
90
- "rewards/chosen": 0.01062955055385828,
91
- "rewards/margins": -0.00802132673561573,
92
- "rewards/rejected": 0.018650878220796585,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9450620412826538,
99
- "logits/rejected": -1.947021245956421,
100
- "logps/chosen": -32.5836296081543,
101
- "logps/rejected": -33.20051193237305,
102
- "loss": 0.6761,
103
- "rewards/accuracies": 0.5375000238418579,
104
- "rewards/chosen": 0.014901289716362953,
105
- "rewards/margins": 0.04112350940704346,
106
- "rewards/rejected": -0.026222219690680504,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.079589366912842,
113
- "logits/rejected": -2.0845787525177,
114
- "logps/chosen": -33.9754524230957,
115
- "logps/rejected": -36.5793342590332,
116
- "loss": 0.6876,
117
- "rewards/accuracies": 0.512499988079071,
118
- "rewards/chosen": 0.01173111516982317,
119
- "rewards/margins": 0.021146392449736595,
120
- "rewards/rejected": -0.00941527634859085,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9424660205841064,
127
- "logits/rejected": -1.945634126663208,
128
- "logps/chosen": -34.394561767578125,
129
- "logps/rejected": -34.57395553588867,
130
- "loss": 0.6921,
131
- "rewards/accuracies": 0.5625,
132
- "rewards/chosen": 0.029921507462859154,
133
- "rewards/margins": 0.013314949348568916,
134
- "rewards/rejected": 0.016606558114290237,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.950564980506897,
141
- "logits/rejected": -1.955082893371582,
142
- "logps/chosen": -32.44951248168945,
143
- "logps/rejected": -32.353668212890625,
144
- "loss": 0.6909,
145
- "rewards/accuracies": 0.5249999761581421,
146
- "rewards/chosen": 0.0161209125071764,
147
- "rewards/margins": 0.011405264027416706,
148
- "rewards/rejected": 0.004715651273727417,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.0489234924316406,
155
- "logits/rejected": -2.046917200088501,
156
- "logps/chosen": -32.236671447753906,
157
- "logps/rejected": -31.263763427734375,
158
- "loss": 0.6951,
159
- "rewards/accuracies": 0.5,
160
- "rewards/chosen": 0.0037471160758286715,
161
- "rewards/margins": 0.0015437646070495248,
162
- "rewards/rejected": 0.002203352050855756,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.243756055831909,
168
- "eval_logits/rejected": -2.238872766494751,
169
- "eval_logps/chosen": -34.025177001953125,
170
- "eval_logps/rejected": -37.50759506225586,
171
- "eval_loss": 0.696557343006134,
172
- "eval_rewards/accuracies": 0.49543190002441406,
173
- "eval_rewards/chosen": 0.008434689603745937,
174
- "eval_rewards/margins": 0.0003138432221021503,
175
- "eval_rewards/rejected": 0.008120844140648842,
176
- "eval_runtime": 146.3373,
177
- "eval_samples_per_second": 2.344,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.0052237510681152,
185
- "logits/rejected": -2.002803325653076,
186
- "logps/chosen": -33.24525833129883,
187
- "logps/rejected": -34.01074981689453,
188
- "loss": 0.7064,
189
- "rewards/accuracies": 0.48750001192092896,
190
- "rewards/chosen": -0.0013012334238737822,
191
- "rewards/margins": -0.018059223890304565,
192
- "rewards/rejected": 0.016757991164922714,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.016326665878296,
199
- "logits/rejected": -2.007951498031616,
200
- "logps/chosen": -32.478302001953125,
201
- "logps/rejected": -32.186073303222656,
202
- "loss": 0.7012,
203
- "rewards/accuracies": 0.48750001192092896,
204
- "rewards/chosen": -0.030056962743401527,
205
- "rewards/margins": -0.007947373203933239,
206
- "rewards/rejected": -0.022109590470790863,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.045950412750244,
213
- "logits/rejected": -2.037904739379883,
214
- "logps/chosen": -30.491313934326172,
215
- "logps/rejected": -32.0540771484375,
216
- "loss": 0.7025,
217
- "rewards/accuracies": 0.4375,
218
- "rewards/chosen": -0.01403898186981678,
219
- "rewards/margins": -0.013075167313218117,
220
- "rewards/rejected": -0.0009638145565986633,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.9765291213989258,
227
- "logits/rejected": -1.986802101135254,
228
- "logps/chosen": -31.39646339416504,
229
- "logps/rejected": -32.522178649902344,
230
- "loss": 0.6923,
231
- "rewards/accuracies": 0.512499988079071,
232
- "rewards/chosen": 0.02087983302772045,
233
- "rewards/margins": 0.00944516807794571,
234
- "rewards/rejected": 0.011434664018452168,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8905407190322876,
241
- "logits/rejected": -1.8916336297988892,
242
- "logps/chosen": -34.17607116699219,
243
- "logps/rejected": -34.76817321777344,
244
- "loss": 0.6851,
245
- "rewards/accuracies": 0.4749999940395355,
246
- "rewards/chosen": 0.01495879702270031,
247
- "rewards/margins": 0.024784717708826065,
248
- "rewards/rejected": -0.00982591975480318,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.942565679550171,
255
- "logits/rejected": -1.9390919208526611,
256
- "logps/chosen": -36.1614875793457,
257
- "logps/rejected": -32.72848129272461,
258
- "loss": 0.6907,
259
- "rewards/accuracies": 0.4625000059604645,
260
- "rewards/chosen": 0.00917207170277834,
261
- "rewards/margins": 0.01433003693819046,
262
- "rewards/rejected": -0.0051579661667346954,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.0418710708618164,
269
- "logits/rejected": -2.034475564956665,
270
- "logps/chosen": -33.76544952392578,
271
- "logps/rejected": -31.358556747436523,
272
- "loss": 0.6791,
273
- "rewards/accuracies": 0.574999988079071,
274
- "rewards/chosen": 0.02917439118027687,
275
- "rewards/margins": 0.03521214798092842,
276
- "rewards/rejected": -0.0060377540066838264,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0476231575012207,
283
- "logits/rejected": -2.0529019832611084,
284
- "logps/chosen": -32.533023834228516,
285
- "logps/rejected": -32.506805419921875,
286
- "loss": 0.6814,
287
- "rewards/accuracies": 0.5625,
288
- "rewards/chosen": 0.010587882250547409,
289
- "rewards/margins": 0.03008626028895378,
290
- "rewards/rejected": -0.019498378038406372,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.048576593399048,
297
- "logits/rejected": -2.0457961559295654,
298
- "logps/chosen": -31.470510482788086,
299
- "logps/rejected": -31.322284698486328,
300
- "loss": 0.687,
301
- "rewards/accuracies": 0.5625,
302
- "rewards/chosen": 0.016599375754594803,
303
- "rewards/margins": 0.020261235535144806,
304
- "rewards/rejected": -0.0036618602462112904,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9184268712997437,
311
- "logits/rejected": -1.9230947494506836,
312
- "logps/chosen": -31.57404136657715,
313
- "logps/rejected": -32.775550842285156,
314
- "loss": 0.6891,
315
- "rewards/accuracies": 0.48750001192092896,
316
- "rewards/chosen": 0.02901501953601837,
317
- "rewards/margins": 0.015173261985182762,
318
- "rewards/rejected": 0.013841753825545311,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.243248462677002,
324
- "eval_logits/rejected": -2.238374948501587,
325
- "eval_logps/chosen": -34.02431869506836,
326
- "eval_logps/rejected": -37.511497497558594,
327
- "eval_loss": 0.6946919560432434,
328
- "eval_rewards/accuracies": 0.49335551261901855,
329
- "eval_rewards/chosen": 0.009210066869854927,
330
- "eval_rewards/margins": 0.0045979218557477,
331
- "eval_rewards/rejected": 0.004612144082784653,
332
- "eval_runtime": 146.0514,
333
- "eval_samples_per_second": 2.348,
334
- "eval_steps_per_second": 0.294,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.0314748287200928,
341
- "logits/rejected": -2.0421440601348877,
342
- "logps/chosen": -31.954639434814453,
343
- "logps/rejected": -33.87318801879883,
344
- "loss": 0.6864,
345
- "rewards/accuracies": 0.550000011920929,
346
- "rewards/chosen": 0.014999927952885628,
347
- "rewards/margins": 0.024442464113235474,
348
- "rewards/rejected": -0.009442536160349846,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9249544143676758,
355
- "logits/rejected": -1.9398053884506226,
356
- "logps/chosen": -30.072036743164062,
357
- "logps/rejected": -31.55165672302246,
358
- "loss": 0.6827,
359
- "rewards/accuracies": 0.6000000238418579,
360
- "rewards/chosen": 0.035550959408283234,
361
- "rewards/margins": 0.029681822285056114,
362
- "rewards/rejected": 0.005869132932275534,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.9830297231674194,
369
- "logits/rejected": -1.9869968891143799,
370
- "logps/chosen": -33.3900146484375,
371
- "logps/rejected": -31.545156478881836,
372
- "loss": 0.6872,
373
- "rewards/accuracies": 0.5375000238418579,
374
- "rewards/chosen": 0.024708259850740433,
375
- "rewards/margins": 0.02097567543387413,
376
- "rewards/rejected": 0.00373258744366467,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9825174808502197,
383
- "logits/rejected": -1.9605509042739868,
384
- "logps/chosen": -34.16549301147461,
385
- "logps/rejected": -34.945499420166016,
386
- "loss": 0.7065,
387
- "rewards/accuracies": 0.5,
388
- "rewards/chosen": -0.010572168976068497,
389
- "rewards/margins": -0.015337007120251656,
390
- "rewards/rejected": 0.004764837212860584,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0242011547088623,
397
- "logits/rejected": -2.02089262008667,
398
- "logps/chosen": -32.91257858276367,
399
- "logps/rejected": -36.22357177734375,
400
- "loss": 0.6913,
401
- "rewards/accuracies": 0.5,
402
- "rewards/chosen": 0.01124012004584074,
403
- "rewards/margins": 0.010020162910223007,
404
- "rewards/rejected": 0.0012199539924040437,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.8910300731658936,
411
- "logits/rejected": -1.888593316078186,
412
- "logps/chosen": -34.191619873046875,
413
- "logps/rejected": -35.50851058959961,
414
- "loss": 0.6975,
415
- "rewards/accuracies": 0.6000000238418579,
416
- "rewards/chosen": -0.0028982784133404493,
417
- "rewards/margins": 0.0006028197822161019,
418
- "rewards/rejected": -0.003501094877719879,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.8757693767547607,
425
- "logits/rejected": -1.8732143640518188,
426
- "logps/chosen": -34.38648223876953,
427
- "logps/rejected": -31.753692626953125,
428
- "loss": 0.6995,
429
- "rewards/accuracies": 0.512499988079071,
430
- "rewards/chosen": 0.010007266886532307,
431
- "rewards/margins": -0.005866709630936384,
432
- "rewards/rejected": 0.015873977914452553,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9795001745224,
439
- "logits/rejected": -1.9688692092895508,
440
- "logps/chosen": -35.32746887207031,
441
- "logps/rejected": -31.84292221069336,
442
- "loss": 0.6843,
443
- "rewards/accuracies": 0.550000011920929,
444
- "rewards/chosen": 0.026254409924149513,
445
- "rewards/margins": 0.027990642935037613,
446
- "rewards/rejected": -0.001736226724460721,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.0753533840179443,
453
- "logits/rejected": -2.0603318214416504,
454
- "logps/chosen": -30.902263641357422,
455
- "logps/rejected": -32.65242385864258,
456
- "loss": 0.6872,
457
  "rewards/accuracies": 0.512499988079071,
458
- "rewards/chosen": 0.019674303010106087,
459
- "rewards/margins": 0.020201902836561203,
460
- "rewards/rejected": -0.0005276011070236564,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.946671724319458,
467
- "logits/rejected": -1.9441486597061157,
468
- "logps/chosen": -32.906158447265625,
469
- "logps/rejected": -30.805999755859375,
470
- "loss": 0.693,
471
- "rewards/accuracies": 0.4749999940395355,
472
- "rewards/chosen": 0.018858108669519424,
473
- "rewards/margins": 0.009128611534833908,
474
- "rewards/rejected": 0.009729497134685516,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.243694305419922,
480
- "eval_logits/rejected": -2.2388274669647217,
481
- "eval_logps/chosen": -34.017173767089844,
482
- "eval_logps/rejected": -37.497615814208984,
483
- "eval_loss": 0.6981561183929443,
484
- "eval_rewards/accuracies": 0.49833887815475464,
485
- "eval_rewards/chosen": 0.015641551464796066,
486
- "eval_rewards/margins": -0.0014640000881627202,
487
- "eval_rewards/rejected": 0.017105549573898315,
488
- "eval_runtime": 146.0253,
489
- "eval_samples_per_second": 2.349,
490
- "eval_steps_per_second": 0.294,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9287757873535156,
497
- "logits/rejected": -1.9255115985870361,
498
- "logps/chosen": -31.573471069335938,
499
- "logps/rejected": -33.71125030517578,
500
- "loss": 0.698,
501
- "rewards/accuracies": 0.5375000238418579,
502
- "rewards/chosen": 0.026891669258475304,
503
- "rewards/margins": -0.000918733305297792,
504
- "rewards/rejected": 0.02781040407717228,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9807313680648804,
511
- "logits/rejected": -1.968425989151001,
512
- "logps/chosen": -34.54930877685547,
513
- "logps/rejected": -33.57306671142578,
514
- "loss": 0.6615,
515
- "rewards/accuracies": 0.637499988079071,
516
- "rewards/chosen": 0.04208679869771004,
517
- "rewards/margins": 0.07289497554302216,
518
- "rewards/rejected": -0.030808180570602417,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.0161099433898926,
525
- "logits/rejected": -2.0146496295928955,
526
- "logps/chosen": -33.46127700805664,
527
- "logps/rejected": -32.4744873046875,
528
- "loss": 0.6883,
529
- "rewards/accuracies": 0.5375000238418579,
530
- "rewards/chosen": 0.022065162658691406,
531
- "rewards/margins": 0.01747960038483143,
532
- "rewards/rejected": 0.004585559479892254,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.102802276611328,
539
- "logits/rejected": -2.0869994163513184,
540
- "logps/chosen": -34.16718292236328,
541
- "logps/rejected": -33.08827590942383,
542
- "loss": 0.706,
543
- "rewards/accuracies": 0.48750001192092896,
544
- "rewards/chosen": 0.010987209156155586,
545
- "rewards/margins": -0.01579815149307251,
546
- "rewards/rejected": 0.026785362511873245,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.97471022605896,
553
- "logits/rejected": -1.9737637042999268,
554
- "logps/chosen": -33.25645065307617,
555
- "logps/rejected": -32.46539306640625,
556
- "loss": 0.6861,
557
- "rewards/accuracies": 0.512499988079071,
558
- "rewards/chosen": 0.023080622777342796,
559
- "rewards/margins": 0.023302335292100906,
560
- "rewards/rejected": -0.00022171511955093592,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9304895401000977,
567
- "logits/rejected": -1.9408460855484009,
568
- "logps/chosen": -32.22315979003906,
569
- "logps/rejected": -35.28199005126953,
570
- "loss": 0.7085,
571
- "rewards/accuracies": 0.42500001192092896,
572
- "rewards/chosen": -0.01662321388721466,
573
- "rewards/margins": -0.022716889157891273,
574
- "rewards/rejected": 0.006093672942370176,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.0692858695983887,
581
- "logits/rejected": -2.0627408027648926,
582
- "logps/chosen": -33.64545440673828,
583
- "logps/rejected": -29.203441619873047,
584
- "loss": 0.704,
585
- "rewards/accuracies": 0.42500001192092896,
586
- "rewards/chosen": 0.00586346909403801,
587
- "rewards/margins": -0.014821496792137623,
588
- "rewards/rejected": 0.020684964954853058,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.928911805152893,
595
- "logits/rejected": -1.9310725927352905,
596
- "logps/chosen": -34.24489212036133,
597
- "logps/rejected": -30.92240333557129,
598
- "loss": 0.6763,
599
- "rewards/accuracies": 0.5249999761581421,
600
- "rewards/chosen": -0.005258283112198114,
601
- "rewards/margins": 0.03954260051250458,
602
- "rewards/rejected": -0.044800881296396255,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.15265155210123435,
610
- "train_runtime": 627.689,
611
- "train_samples_per_second": 4.905,
612
- "train_steps_per_second": 0.613
613
  }
614
  ],
615
  "logging_steps": 10,
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.5,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.866180658340454,
29
+ "logits/rejected": -1.8704978227615356,
30
+ "logps/chosen": -36.953983306884766,
31
+ "logps/rejected": -33.65004348754883,
32
+ "loss": 0.4868,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.0473538413643837,
35
+ "rewards/margins": 0.06532809138298035,
36
+ "rewards/rejected": -0.01797425001859665,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.9972845315933228,
43
+ "logits/rejected": -1.999929428100586,
44
+ "logps/chosen": -29.655675888061523,
45
+ "logps/rejected": -29.05768394470215,
46
+ "loss": 0.5019,
47
+ "rewards/accuracies": 0.38749998807907104,
48
+ "rewards/chosen": -0.012135368771851063,
49
+ "rewards/margins": -0.01731942594051361,
50
+ "rewards/rejected": 0.005184055306017399,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.9195616245269775,
57
+ "logits/rejected": -1.9168630838394165,
58
+ "logps/chosen": -31.41298484802246,
59
+ "logps/rejected": -33.21580123901367,
60
+ "loss": 0.5,
61
+ "rewards/accuracies": 0.5249999761581421,
62
+ "rewards/chosen": 0.0027983374893665314,
63
+ "rewards/margins": 0.00040734148933552206,
64
+ "rewards/rejected": 0.0023909960873425007,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.0171382427215576,
71
+ "logits/rejected": -2.0083811283111572,
72
+ "logps/chosen": -32.577945709228516,
73
+ "logps/rejected": -32.48347473144531,
74
+ "loss": 0.5069,
75
+ "rewards/accuracies": 0.42500001192092896,
76
+ "rewards/chosen": -0.001024238532409072,
77
+ "rewards/margins": -0.027298470959067345,
78
+ "rewards/rejected": 0.02627423405647278,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.8646976947784424,
85
+ "logits/rejected": -1.8539081811904907,
86
+ "logps/chosen": -33.5512809753418,
87
+ "logps/rejected": -35.44205856323242,
88
+ "loss": 0.4992,
89
+ "rewards/accuracies": 0.44999998807907104,
90
+ "rewards/chosen": 0.005579059012234211,
91
+ "rewards/margins": -0.003679370041936636,
92
+ "rewards/rejected": 0.009258432313799858,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9457124471664429,
99
+ "logits/rejected": -1.9476444721221924,
100
+ "logps/chosen": -32.58470153808594,
101
+ "logps/rejected": -33.17728042602539,
102
+ "loss": 0.4959,
103
+ "rewards/accuracies": 0.48750001192092896,
104
+ "rewards/chosen": 0.013933077454566956,
105
+ "rewards/margins": 0.019245292991399765,
106
+ "rewards/rejected": -0.005312214605510235,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.079798698425293,
113
+ "logits/rejected": -2.084780216217041,
114
+ "logps/chosen": -33.97211456298828,
115
+ "logps/rejected": -36.565345764160156,
116
+ "loss": 0.4982,
117
+ "rewards/accuracies": 0.5,
118
+ "rewards/chosen": 0.014736099168658257,
119
+ "rewards/margins": 0.011561033315956593,
120
+ "rewards/rejected": 0.003175063757225871,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.9421533346176147,
127
+ "logits/rejected": -1.9453033208847046,
128
+ "logps/chosen": -34.40825653076172,
129
+ "logps/rejected": -34.583003997802734,
130
+ "loss": 0.4975,
131
+ "rewards/accuracies": 0.5,
132
+ "rewards/chosen": 0.017596226185560226,
133
+ "rewards/margins": 0.00913666095584631,
134
+ "rewards/rejected": 0.008459563367068768,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.9502623081207275,
141
+ "logits/rejected": -1.954768180847168,
142
+ "logps/chosen": -32.47718048095703,
143
+ "logps/rejected": -32.35026550292969,
144
+ "loss": 0.5036,
145
+ "rewards/accuracies": 0.550000011920929,
146
+ "rewards/chosen": -0.00878011155873537,
147
+ "rewards/margins": -0.016559559851884842,
148
+ "rewards/rejected": 0.007779450621455908,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.0489068031311035,
155
+ "logits/rejected": -2.046910285949707,
156
+ "logps/chosen": -32.23564529418945,
157
+ "logps/rejected": -31.257396697998047,
158
+ "loss": 0.5014,
159
+ "rewards/accuracies": 0.512499988079071,
160
+ "rewards/chosen": 0.004673120565712452,
161
+ "rewards/margins": -0.0032607216853648424,
162
+ "rewards/rejected": 0.00793384201824665,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2435286045074463,
168
+ "eval_logits/rejected": -2.2386460304260254,
169
+ "eval_logps/chosen": -34.00050735473633,
170
+ "eval_logps/rejected": -37.493465423583984,
171
+ "eval_loss": 0.49828124046325684,
172
+ "eval_rewards/accuracies": 0.5307309031486511,
173
+ "eval_rewards/chosen": 0.03064088523387909,
174
+ "eval_rewards/margins": 0.009802807122468948,
175
+ "eval_rewards/rejected": 0.02083807997405529,
176
+ "eval_runtime": 146.1328,
177
+ "eval_samples_per_second": 2.347,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.0051121711730957,
185
+ "logits/rejected": -2.0026957988739014,
186
+ "logps/chosen": -33.226436614990234,
187
+ "logps/rejected": -34.04267120361328,
188
+ "loss": 0.4953,
189
+ "rewards/accuracies": 0.5375000238418579,
190
+ "rewards/chosen": 0.015639375895261765,
191
+ "rewards/margins": 0.027611929923295975,
192
+ "rewards/rejected": -0.011972553096711636,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.016453981399536,
199
+ "logits/rejected": -2.0080838203430176,
200
+ "logps/chosen": -32.4493522644043,
201
+ "logps/rejected": -32.185325622558594,
202
+ "loss": 0.4964,
203
+ "rewards/accuracies": 0.4625000059604645,
204
+ "rewards/chosen": -0.0040020109154284,
205
+ "rewards/margins": 0.01743561029434204,
206
+ "rewards/rejected": -0.021437620744109154,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.045841932296753,
213
+ "logits/rejected": -2.037797689437866,
214
+ "logps/chosen": -30.510135650634766,
215
+ "logps/rejected": -32.068138122558594,
216
+ "loss": 0.5028,
217
+ "rewards/accuracies": 0.4625000059604645,
218
+ "rewards/chosen": -0.030974898487329483,
219
+ "rewards/margins": -0.01735590770840645,
220
+ "rewards/rejected": -0.013618985190987587,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.976995825767517,
227
+ "logits/rejected": -1.9872783422470093,
228
+ "logps/chosen": -31.390186309814453,
229
+ "logps/rejected": -32.526187896728516,
230
+ "loss": 0.4957,
231
+ "rewards/accuracies": 0.550000011920929,
232
+ "rewards/chosen": 0.026526659727096558,
233
+ "rewards/margins": 0.018703728914260864,
234
+ "rewards/rejected": 0.007822930812835693,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.890682578086853,
241
+ "logits/rejected": -1.8917691707611084,
242
+ "logps/chosen": -34.19375991821289,
243
+ "logps/rejected": -34.75046920776367,
244
+ "loss": 0.5014,
245
+ "rewards/accuracies": 0.4124999940395355,
246
+ "rewards/chosen": -0.0009607344982214272,
247
+ "rewards/margins": -0.0070659080520272255,
248
+ "rewards/rejected": 0.0061051733791828156,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.9428867101669312,
255
+ "logits/rejected": -1.939404845237732,
256
+ "logps/chosen": -36.15603256225586,
257
+ "logps/rejected": -32.732059478759766,
258
+ "loss": 0.4954,
259
+ "rewards/accuracies": 0.5625,
260
+ "rewards/chosen": 0.014080708846449852,
261
+ "rewards/margins": 0.022458387538790703,
262
+ "rewards/rejected": -0.008377680554986,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.0421247482299805,
269
+ "logits/rejected": -2.034731388092041,
270
+ "logps/chosen": -33.75699234008789,
271
+ "logps/rejected": -31.35996437072754,
272
+ "loss": 0.4896,
273
+ "rewards/accuracies": 0.612500011920929,
274
+ "rewards/chosen": 0.03678463399410248,
275
+ "rewards/margins": 0.04408547282218933,
276
+ "rewards/rejected": -0.007300837431102991,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.0474019050598145,
283
+ "logits/rejected": -2.052670955657959,
284
+ "logps/chosen": -32.514625549316406,
285
+ "logps/rejected": -32.481895446777344,
286
+ "loss": 0.4933,
287
+ "rewards/accuracies": 0.550000011920929,
288
+ "rewards/chosen": 0.0271458737552166,
289
+ "rewards/margins": 0.024227729067206383,
290
+ "rewards/rejected": 0.002918146550655365,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.048748016357422,
297
+ "logits/rejected": -2.045966625213623,
298
+ "logps/chosen": -31.47454833984375,
299
+ "logps/rejected": -31.311954498291016,
300
+ "loss": 0.498,
301
+ "rewards/accuracies": 0.5,
302
+ "rewards/chosen": 0.012961958535015583,
303
+ "rewards/margins": 0.007329396903514862,
304
+ "rewards/rejected": 0.005632560700178146,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9195592403411865,
311
+ "logits/rejected": -1.9242277145385742,
312
+ "logps/chosen": -31.59905433654785,
313
+ "logps/rejected": -32.79246139526367,
314
+ "loss": 0.4978,
315
+ "rewards/accuracies": 0.550000011920929,
316
+ "rewards/chosen": 0.00650545209646225,
317
+ "rewards/margins": 0.007882391102612019,
318
+ "rewards/rejected": -0.0013769377255812287,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.244333267211914,
324
+ "eval_logits/rejected": -2.239457845687866,
325
+ "eval_logps/chosen": -34.00139236450195,
326
+ "eval_logps/rejected": -37.48446273803711,
327
+ "eval_loss": 0.4998188018798828,
328
+ "eval_rewards/accuracies": 0.5103820562362671,
329
+ "eval_rewards/chosen": 0.02984398789703846,
330
+ "eval_rewards/margins": 0.000900517450645566,
331
+ "eval_rewards/rejected": 0.028943469747900963,
332
+ "eval_runtime": 145.8897,
333
+ "eval_samples_per_second": 2.351,
334
+ "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0324440002441406,
341
+ "logits/rejected": -2.043117046356201,
342
+ "logps/chosen": -31.93975830078125,
343
+ "logps/rejected": -33.84552001953125,
344
+ "loss": 0.4961,
345
+ "rewards/accuracies": 0.5,
346
+ "rewards/chosen": 0.028390545397996902,
347
+ "rewards/margins": 0.012928962707519531,
348
+ "rewards/rejected": 0.015461576171219349,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.9257566928863525,
355
+ "logits/rejected": -1.9405906200408936,
356
+ "logps/chosen": -30.077129364013672,
357
+ "logps/rejected": -31.565658569335938,
358
+ "loss": 0.493,
359
+ "rewards/accuracies": 0.5874999761581421,
360
+ "rewards/chosen": 0.030965950340032578,
361
+ "rewards/margins": 0.037700776010751724,
362
+ "rewards/rejected": -0.00673482334241271,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.9837474822998047,
369
+ "logits/rejected": -1.9877216815948486,
370
+ "logps/chosen": -33.37251281738281,
371
+ "logps/rejected": -31.5429744720459,
372
+ "loss": 0.4927,
373
+ "rewards/accuracies": 0.550000011920929,
374
+ "rewards/chosen": 0.04046114161610603,
375
+ "rewards/margins": 0.034767456352710724,
376
+ "rewards/rejected": 0.00569369038566947,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.9833183288574219,
383
+ "logits/rejected": -1.9613754749298096,
384
+ "logps/chosen": -34.14833068847656,
385
+ "logps/rejected": -34.94057846069336,
386
+ "loss": 0.5009,
387
+ "rewards/accuracies": 0.5249999761581421,
388
+ "rewards/chosen": 0.004874309059232473,
389
+ "rewards/margins": -0.004319626372307539,
390
+ "rewards/rejected": 0.009193938225507736,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.025144338607788,
397
+ "logits/rejected": -2.0218405723571777,
398
+ "logps/chosen": -32.892818450927734,
399
+ "logps/rejected": -36.1950798034668,
400
+ "loss": 0.5004,
401
+ "rewards/accuracies": 0.48750001192092896,
402
+ "rewards/chosen": 0.029024243354797363,
403
+ "rewards/margins": 0.002163961064070463,
404
+ "rewards/rejected": 0.026860278099775314,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.8918704986572266,
411
+ "logits/rejected": -1.8894078731536865,
412
+ "logps/chosen": -34.1577262878418,
413
+ "logps/rejected": -35.497093200683594,
414
+ "loss": 0.4956,
415
+ "rewards/accuracies": 0.5625,
416
+ "rewards/chosen": 0.02760697342455387,
417
+ "rewards/margins": 0.02083553373813629,
418
+ "rewards/rejected": 0.006771440617740154,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.8762916326522827,
425
+ "logits/rejected": -1.8737586736679077,
426
+ "logps/chosen": -34.374244689941406,
427
+ "logps/rejected": -31.73373031616211,
428
+ "loss": 0.5038,
429
+ "rewards/accuracies": 0.4625000059604645,
430
+ "rewards/chosen": 0.02102069742977619,
431
+ "rewards/margins": -0.012818600051105022,
432
+ "rewards/rejected": 0.03383929282426834,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.9803699254989624,
439
+ "logits/rejected": -1.9697377681732178,
440
+ "logps/chosen": -35.30424880981445,
441
+ "logps/rejected": -31.83871841430664,
442
+ "loss": 0.4898,
443
+ "rewards/accuracies": 0.5375000238418579,
444
+ "rewards/chosen": 0.047153402119874954,
445
+ "rewards/margins": 0.04510683938860893,
446
+ "rewards/rejected": 0.0020465687848627567,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.0763792991638184,
453
+ "logits/rejected": -2.061370372772217,
454
+ "logps/chosen": -30.90180015563965,
455
+ "logps/rejected": -32.65237045288086,
456
+ "loss": 0.4962,
457
  "rewards/accuracies": 0.512499988079071,
458
+ "rewards/chosen": 0.020093059167265892,
459
+ "rewards/margins": 0.020573493093252182,
460
+ "rewards/rejected": -0.00048043689457699656,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.9472248554229736,
467
+ "logits/rejected": -1.9446901082992554,
468
+ "logps/chosen": -32.882179260253906,
469
+ "logps/rejected": -30.789831161499023,
470
+ "loss": 0.4964,
471
+ "rewards/accuracies": 0.5,
472
+ "rewards/chosen": 0.04043656960129738,
473
+ "rewards/margins": 0.016158219426870346,
474
+ "rewards/rejected": 0.024278344586491585,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.244403123855591,
480
+ "eval_logits/rejected": -2.239530563354492,
481
+ "eval_logps/chosen": -34.00559997558594,
482
+ "eval_logps/rejected": -37.48896408081055,
483
+ "eval_loss": 0.49940842390060425,
484
+ "eval_rewards/accuracies": 0.4987541437149048,
485
+ "eval_rewards/chosen": 0.026060525327920914,
486
+ "eval_rewards/margins": 0.001171064912341535,
487
+ "eval_rewards/rejected": 0.02488945797085762,
488
+ "eval_runtime": 145.9672,
489
+ "eval_samples_per_second": 2.35,
490
+ "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.9294729232788086,
497
+ "logits/rejected": -1.9262111186981201,
498
+ "logps/chosen": -31.556472778320312,
499
+ "logps/rejected": -33.73994827270508,
500
+ "loss": 0.4915,
501
+ "rewards/accuracies": 0.574999988079071,
502
+ "rewards/chosen": 0.04218826815485954,
503
+ "rewards/margins": 0.04020850360393524,
504
+ "rewards/rejected": 0.001979765249416232,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.9809057712554932,
511
+ "logits/rejected": -1.968597412109375,
512
+ "logps/chosen": -34.546607971191406,
513
+ "logps/rejected": -33.53887939453125,
514
+ "loss": 0.4917,
515
+ "rewards/accuracies": 0.5625,
516
+ "rewards/chosen": 0.044516824185848236,
517
+ "rewards/margins": 0.04455769807100296,
518
+ "rewards/rejected": -4.087165143573657e-05,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.0169310569763184,
525
+ "logits/rejected": -2.0154881477355957,
526
+ "logps/chosen": -33.446449279785156,
527
+ "logps/rejected": -32.45838165283203,
528
+ "loss": 0.4965,
529
+ "rewards/accuracies": 0.48750001192092896,
530
+ "rewards/chosen": 0.035413958132267,
531
+ "rewards/margins": 0.016332540661096573,
532
+ "rewards/rejected": 0.019081417471170425,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.1038289070129395,
539
+ "logits/rejected": -2.0880303382873535,
540
+ "logps/chosen": -34.16547393798828,
541
+ "logps/rejected": -33.04523468017578,
542
+ "loss": 0.5123,
543
+ "rewards/accuracies": 0.36250001192092896,
544
+ "rewards/chosen": 0.012526323087513447,
545
+ "rewards/margins": -0.05299854278564453,
546
+ "rewards/rejected": 0.0655248612165451,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.9754581451416016,
553
+ "logits/rejected": -1.9744971990585327,
554
+ "logps/chosen": -33.235225677490234,
555
+ "logps/rejected": -32.451236724853516,
556
+ "loss": 0.493,
557
+ "rewards/accuracies": 0.48750001192092896,
558
+ "rewards/chosen": 0.04218399524688721,
559
+ "rewards/margins": 0.029664650559425354,
560
+ "rewards/rejected": 0.0125193502753973,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9313201904296875,
567
+ "logits/rejected": -1.9416812658309937,
568
+ "logps/chosen": -32.19329071044922,
569
+ "logps/rejected": -35.28053283691406,
570
+ "loss": 0.5008,
571
+ "rewards/accuracies": 0.5249999761581421,
572
+ "rewards/chosen": 0.010262362658977509,
573
+ "rewards/margins": 0.0028595193289220333,
574
+ "rewards/rejected": 0.007402840070426464,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.070078134536743,
581
+ "logits/rejected": -2.063514232635498,
582
+ "logps/chosen": -33.65337371826172,
583
+ "logps/rejected": -29.21976661682129,
584
+ "loss": 0.5017,
585
+ "rewards/accuracies": 0.5,
586
+ "rewards/chosen": -0.0012650005519390106,
587
+ "rewards/margins": -0.007256894372403622,
588
+ "rewards/rejected": 0.0059918928891420364,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.9295686483383179,
595
+ "logits/rejected": -1.9317378997802734,
596
+ "logps/chosen": -34.22509002685547,
597
+ "logps/rejected": -30.884735107421875,
598
+ "loss": 0.4954,
599
+ "rewards/accuracies": 0.5,
600
+ "rewards/chosen": 0.012562277726829052,
601
+ "rewards/margins": 0.02345881052315235,
602
+ "rewards/rejected": -0.01089653093367815,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.10993070664343896,
610
+ "train_runtime": 627.3649,
611
+ "train_samples_per_second": 4.908,
612
+ "train_steps_per_second": 0.614
613
  }
614
  ],
615
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b30357d0b1d1e0ef1bb0e18e98e90afb60269f8515a90c0483fcf6e53040bb5
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:521df8373b7294b087c904d0266936c5550b5766587020eb20889178b14ee863
3
  size 4984