File size: 9,488 Bytes
b21f92e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.9158878504672896,
  "eval_steps": 50,
  "global_step": 78,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.18691588785046728,
      "grad_norm": 69.62266204421053,
      "learning_rate": 5e-07,
      "logits/chosen": -2.7059075832366943,
      "logits/rejected": -2.7139859199523926,
      "logps/chosen": -297.3978271484375,
      "logps/rejected": -209.1825408935547,
      "loss": 0.6897,
      "rewards/accuracies": 0.33125001192092896,
      "rewards/chosen": 0.015906766057014465,
      "rewards/margins": 0.004719345830380917,
      "rewards/rejected": 0.011187421157956123,
      "step": 5
    },
    {
      "epoch": 0.37383177570093457,
      "grad_norm": 48.6625786446812,
      "learning_rate": 1e-06,
      "logits/chosen": -2.687206745147705,
      "logits/rejected": -2.655573606491089,
      "logps/chosen": -285.5063781738281,
      "logps/rejected": -242.2504119873047,
      "loss": 0.6171,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.6171835660934448,
      "rewards/margins": 0.23738539218902588,
      "rewards/rejected": 0.3797982335090637,
      "step": 10
    },
    {
      "epoch": 0.5607476635514018,
      "grad_norm": 45.48115817416795,
      "learning_rate": 9.867190271803463e-07,
      "logits/chosen": -2.525240898132324,
      "logits/rejected": -2.515592575073242,
      "logps/chosen": -250.5599365234375,
      "logps/rejected": -217.3943328857422,
      "loss": 0.5754,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": 1.57169771194458,
      "rewards/margins": 0.7960993051528931,
      "rewards/rejected": 0.7755983471870422,
      "step": 15
    },
    {
      "epoch": 0.7476635514018691,
      "grad_norm": 45.38701475647773,
      "learning_rate": 9.475816456775312e-07,
      "logits/chosen": -2.435847043991089,
      "logits/rejected": -2.4306652545928955,
      "logps/chosen": -261.1281433105469,
      "logps/rejected": -204.87100219726562,
      "loss": 0.5524,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": 1.8718767166137695,
      "rewards/margins": 1.3355920314788818,
      "rewards/rejected": 0.5362845063209534,
      "step": 20
    },
    {
      "epoch": 0.9345794392523364,
      "grad_norm": 48.2732857719964,
      "learning_rate": 8.846669854914395e-07,
      "logits/chosen": -2.327732563018799,
      "logits/rejected": -2.3180975914001465,
      "logps/chosen": -264.89666748046875,
      "logps/rejected": -211.42849731445312,
      "loss": 0.6395,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": 1.9242029190063477,
      "rewards/margins": 1.439832329750061,
      "rewards/rejected": 0.48437052965164185,
      "step": 25
    },
    {
      "epoch": 1.1214953271028036,
      "grad_norm": 20.029653684022065,
      "learning_rate": 8.013173181896282e-07,
      "logits/chosen": -2.2464213371276855,
      "logits/rejected": -2.243056297302246,
      "logps/chosen": -277.769287109375,
      "logps/rejected": -227.4216766357422,
      "loss": 0.362,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": 2.5170092582702637,
      "rewards/margins": 2.9024429321289062,
      "rewards/rejected": -0.385433554649353,
      "step": 30
    },
    {
      "epoch": 1.308411214953271,
      "grad_norm": 21.973162675661662,
      "learning_rate": 7.019605024359474e-07,
      "logits/chosen": -2.2861316204071045,
      "logits/rejected": -2.2835488319396973,
      "logps/chosen": -255.995849609375,
      "logps/rejected": -221.6183624267578,
      "loss": 0.2162,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": 2.4375126361846924,
      "rewards/margins": 3.213893413543701,
      "rewards/rejected": -0.776380717754364,
      "step": 35
    },
    {
      "epoch": 1.4953271028037383,
      "grad_norm": 25.069585328251865,
      "learning_rate": 5.918747589082852e-07,
      "logits/chosen": -2.4357216358184814,
      "logits/rejected": -2.4284071922302246,
      "logps/chosen": -250.3916015625,
      "logps/rejected": -209.541015625,
      "loss": 0.2639,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": 2.633223056793213,
      "rewards/margins": 3.557281494140625,
      "rewards/rejected": -0.9240585565567017,
      "step": 40
    },
    {
      "epoch": 1.6822429906542056,
      "grad_norm": 20.270964824453813,
      "learning_rate": 4.769082706771303e-07,
      "logits/chosen": -2.5241520404815674,
      "logits/rejected": -2.5030782222747803,
      "logps/chosen": -253.24951171875,
      "logps/rejected": -230.7584228515625,
      "loss": 0.2784,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": 2.7160353660583496,
      "rewards/margins": 3.604368209838867,
      "rewards/rejected": -0.8883330225944519,
      "step": 45
    },
    {
      "epoch": 1.8691588785046729,
      "grad_norm": 21.736964227249803,
      "learning_rate": 3.6316850496395855e-07,
      "logits/chosen": -2.553373336791992,
      "logits/rejected": -2.5442967414855957,
      "logps/chosen": -246.09017944335938,
      "logps/rejected": -234.6650390625,
      "loss": 0.2858,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": 2.596212148666382,
      "rewards/margins": 3.6547951698303223,
      "rewards/rejected": -1.0585827827453613,
      "step": 50
    },
    {
      "epoch": 1.8691588785046729,
      "eval_logits/chosen": -2.54971981048584,
      "eval_logits/rejected": -2.5504722595214844,
      "eval_logps/chosen": -272.8219299316406,
      "eval_logps/rejected": -252.95909118652344,
      "eval_loss": 0.5790095925331116,
      "eval_rewards/accuracies": 0.78125,
      "eval_rewards/chosen": 1.9153512716293335,
      "eval_rewards/margins": 2.3437132835388184,
      "eval_rewards/rejected": -0.42836204171180725,
      "eval_runtime": 50.5185,
      "eval_samples_per_second": 15.044,
      "eval_steps_per_second": 0.238,
      "step": 50
    },
    {
      "epoch": 2.05607476635514,
      "grad_norm": 21.289276215049806,
      "learning_rate": 2.566977607165719e-07,
      "logits/chosen": -2.552999258041382,
      "logits/rejected": -2.537696361541748,
      "logps/chosen": -258.63702392578125,
      "logps/rejected": -246.5629425048828,
      "loss": 0.31,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": 2.7466766834259033,
      "rewards/margins": 3.7296576499938965,
      "rewards/rejected": -0.9829813838005066,
      "step": 55
    },
    {
      "epoch": 2.2429906542056073,
      "grad_norm": 19.52685252408006,
      "learning_rate": 1.631521781767214e-07,
      "logits/chosen": -2.542659044265747,
      "logits/rejected": -2.5249571800231934,
      "logps/chosen": -249.5672607421875,
      "logps/rejected": -231.4530792236328,
      "loss": 0.1789,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": 2.6950507164001465,
      "rewards/margins": 3.7824130058288574,
      "rewards/rejected": -1.0873624086380005,
      "step": 60
    },
    {
      "epoch": 2.4299065420560746,
      "grad_norm": 14.908673380132758,
      "learning_rate": 8.75012627008489e-08,
      "logits/chosen": -2.545358419418335,
      "logits/rejected": -2.527719020843506,
      "logps/chosen": -246.728759765625,
      "logps/rejected": -219.3707275390625,
      "loss": 0.1692,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 2.9670886993408203,
      "rewards/margins": 4.2727837562561035,
      "rewards/rejected": -1.305694818496704,
      "step": 65
    },
    {
      "epoch": 2.616822429906542,
      "grad_norm": 17.801479834645725,
      "learning_rate": 3.376388529782215e-08,
      "logits/chosen": -2.518758773803711,
      "logits/rejected": -2.5229485034942627,
      "logps/chosen": -247.3220977783203,
      "logps/rejected": -231.07275390625,
      "loss": 0.1563,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": 3.1646437644958496,
      "rewards/margins": 4.11710262298584,
      "rewards/rejected": -0.9524585008621216,
      "step": 70
    },
    {
      "epoch": 2.803738317757009,
      "grad_norm": 15.637985661159195,
      "learning_rate": 4.794784562397458e-09,
      "logits/chosen": -2.5335326194763184,
      "logits/rejected": -2.511383533477783,
      "logps/chosen": -236.031005859375,
      "logps/rejected": -252.79702758789062,
      "loss": 0.1681,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": 2.80531907081604,
      "rewards/margins": 3.976816177368164,
      "rewards/rejected": -1.171496868133545,
      "step": 75
    },
    {
      "epoch": 2.9158878504672896,
      "step": 78,
      "total_flos": 919378820333568.0,
      "train_loss": 0.3579892989916679,
      "train_runtime": 2691.602,
      "train_samples_per_second": 7.621,
      "train_steps_per_second": 0.029
    }
  ],
  "logging_steps": 5,
  "max_steps": 78,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 919378820333568.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}