amrn commited on
Commit
9a9d392
·
verified ·
1 Parent(s): 78d4a52

Model save

Browse files
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/myronorg/huggingface/runs/r98aklh1)
31
 
32
 
33
  This model was trained with SFT.
@@ -36,7 +36,7 @@ This model was trained with SFT.
36
 
37
  - TRL: 0.15.0.dev0
38
  - Transformers: 4.49.0.dev0
39
- - Pytorch: 2.5.1+cu121
40
  - Datasets: 3.2.0
41
  - Tokenizers: 0.21.0
42
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/myronorg/huggingface/runs/omfw97f8)
31
 
32
 
33
  This model was trained with SFT.
 
36
 
37
  - TRL: 0.15.0.dev0
38
  - Transformers: 4.49.0.dev0
39
+ - Pytorch: 2.5.1
40
  - Datasets: 3.2.0
41
  - Tokenizers: 0.21.0
42
 
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "total_flos": 76916824473600.0,
4
- "train_loss": 0.8026671296746067,
5
- "train_runtime": 1041.4464,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 20.75,
8
- "train_steps_per_second": 0.162
9
  }
 
1
  {
2
+ "epoch": 0.9948186528497409,
3
+ "total_flos": 76517996494848.0,
4
+ "train_loss": 0.8023917121546609,
5
+ "train_runtime": 1111.3885,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 19.444,
8
+ "train_steps_per_second": 0.151
9
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd46c4f7659b2fe3bbf379e42dd83f61c77b26195e748cd3db4e58c8cc8e89cb
3
  size 3087467144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e45c9331ae85fc04b1df2a41a9aa2a752fb463e7a58932c68e5cc4e2d9c19407
3
  size 3087467144
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "total_flos": 76916824473600.0,
4
- "train_loss": 0.8026671296746067,
5
- "train_runtime": 1041.4464,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 20.75,
8
- "train_steps_per_second": 0.162
9
  }
 
1
  {
2
+ "epoch": 0.9948186528497409,
3
+ "total_flos": 76517996494848.0,
4
+ "train_loss": 0.8023917121546609,
5
+ "train_runtime": 1111.3885,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 19.444,
8
+ "train_steps_per_second": 0.151
9
  }
trainer_state.json CHANGED
@@ -1,264 +1,264 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 100,
6
- "global_step": 169,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.029585798816568046,
13
- "grad_norm": 2.342892423396605,
14
  "learning_rate": 5.882352941176471e-06,
15
- "loss": 1.0992,
16
  "step": 5
17
  },
18
  {
19
- "epoch": 0.05917159763313609,
20
- "grad_norm": 1.6368371604135565,
21
  "learning_rate": 1.1764705882352942e-05,
22
  "loss": 1.0402,
23
  "step": 10
24
  },
25
  {
26
- "epoch": 0.08875739644970414,
27
- "grad_norm": 0.8989769231311087,
28
  "learning_rate": 1.7647058823529414e-05,
29
  "loss": 0.9533,
30
  "step": 15
31
  },
32
  {
33
- "epoch": 0.11834319526627218,
34
- "grad_norm": 0.6137256595940042,
35
- "learning_rate": 1.9980782984658682e-05,
36
  "loss": 0.8922,
37
  "step": 20
38
  },
39
  {
40
- "epoch": 0.14792899408284024,
41
- "grad_norm": 0.5492009965568149,
42
- "learning_rate": 1.9863613034027224e-05,
43
  "loss": 0.8551,
44
  "step": 25
45
  },
46
  {
47
- "epoch": 0.17751479289940827,
48
- "grad_norm": 0.4349573184458919,
49
- "learning_rate": 1.9641197940012136e-05,
50
- "loss": 0.8283,
51
  "step": 30
52
  },
53
  {
54
- "epoch": 0.20710059171597633,
55
- "grad_norm": 0.38659663740930433,
56
- "learning_rate": 1.9315910880512792e-05,
57
  "loss": 0.8229,
58
  "step": 35
59
  },
60
  {
61
- "epoch": 0.23668639053254437,
62
- "grad_norm": 0.4119151414508529,
63
- "learning_rate": 1.8891222681391853e-05,
64
  "loss": 0.8225,
65
  "step": 40
66
  },
67
  {
68
- "epoch": 0.26627218934911245,
69
- "grad_norm": 0.35458480206676984,
70
- "learning_rate": 1.8371664782625287e-05,
71
  "loss": 0.8073,
72
  "step": 45
73
  },
74
  {
75
- "epoch": 0.2958579881656805,
76
- "grad_norm": 0.34430708054184417,
77
- "learning_rate": 1.7762780887657576e-05,
78
  "loss": 0.7977,
79
  "step": 50
80
  },
81
  {
82
- "epoch": 0.3254437869822485,
83
- "grad_norm": 0.3838154059326915,
84
- "learning_rate": 1.7071067811865477e-05,
85
  "loss": 0.7877,
86
  "step": 55
87
  },
88
  {
89
- "epoch": 0.35502958579881655,
90
- "grad_norm": 0.383532491946636,
91
- "learning_rate": 1.6303906161279554e-05,
92
- "loss": 0.7981,
93
  "step": 60
94
  },
95
  {
96
- "epoch": 0.38461538461538464,
97
- "grad_norm": 0.35083975989542854,
98
- "learning_rate": 1.5469481581224274e-05,
99
  "loss": 0.7722,
100
  "step": 65
101
  },
102
  {
103
- "epoch": 0.41420118343195267,
104
- "grad_norm": 0.35395203723670016,
105
- "learning_rate": 1.4576697415156818e-05,
106
  "loss": 0.7741,
107
  "step": 70
108
  },
109
  {
110
- "epoch": 0.4437869822485207,
111
- "grad_norm": 0.34555629817939354,
112
- "learning_rate": 1.3635079705638298e-05,
113
  "loss": 0.7852,
114
  "step": 75
115
  },
116
  {
117
- "epoch": 0.47337278106508873,
118
- "grad_norm": 0.35805756615265727,
119
- "learning_rate": 1.2654675551080724e-05,
120
  "loss": 0.7667,
121
  "step": 80
122
  },
123
  {
124
- "epoch": 0.5029585798816568,
125
- "grad_norm": 0.3781865827387411,
126
- "learning_rate": 1.164594590280734e-05,
127
- "loss": 0.7706,
128
  "step": 85
129
  },
130
  {
131
- "epoch": 0.5325443786982249,
132
- "grad_norm": 0.36455865865784565,
133
- "learning_rate": 1.0619653946285948e-05,
134
  "loss": 0.7701,
135
  "step": 90
136
  },
137
  {
138
- "epoch": 0.5621301775147929,
139
- "grad_norm": 0.37271134883355717,
140
- "learning_rate": 9.586750257511868e-06,
141
  "loss": 0.7772,
142
  "step": 95
143
  },
144
  {
145
- "epoch": 0.591715976331361,
146
- "grad_norm": 0.3429534883848641,
147
- "learning_rate": 8.558255959926533e-06,
148
- "loss": 0.753,
149
  "step": 100
150
  },
151
  {
152
- "epoch": 0.591715976331361,
153
- "eval_loss": 0.7881382703781128,
154
- "eval_runtime": 1.6822,
155
- "eval_samples_per_second": 76.093,
156
- "eval_steps_per_second": 2.378,
157
  "step": 100
158
  },
159
  {
160
- "epoch": 0.621301775147929,
161
- "grad_norm": 0.34272683406564375,
162
- "learning_rate": 7.545145128592009e-06,
163
- "loss": 0.757,
164
  "step": 105
165
  },
166
  {
167
- "epoch": 0.650887573964497,
168
- "grad_norm": 0.35326011905545884,
169
- "learning_rate": 6.558227696373617e-06,
170
  "loss": 0.7682,
171
  "step": 110
172
  },
173
  {
174
- "epoch": 0.6804733727810651,
175
- "grad_norm": 0.3175593796339319,
176
- "learning_rate": 5.608034111526298e-06,
177
- "loss": 0.7623,
178
  "step": 115
179
  },
180
  {
181
- "epoch": 0.7100591715976331,
182
- "grad_norm": 0.30113559390386235,
183
- "learning_rate": 4.704702977392914e-06,
184
- "loss": 0.7514,
185
  "step": 120
186
  },
187
  {
188
- "epoch": 0.7396449704142012,
189
- "grad_norm": 0.3514515497713594,
190
- "learning_rate": 3.857872873103322e-06,
191
- "loss": 0.7538,
192
  "step": 125
193
  },
194
  {
195
- "epoch": 0.7692307692307693,
196
- "grad_norm": 0.32077249449415507,
197
- "learning_rate": 3.0765795095517026e-06,
198
- "loss": 0.7555,
199
  "step": 130
200
  },
201
  {
202
- "epoch": 0.7988165680473372,
203
- "grad_norm": 0.32980480359237835,
204
- "learning_rate": 2.369159318001937e-06,
205
- "loss": 0.7585,
206
  "step": 135
207
  },
208
  {
209
- "epoch": 0.8284023668639053,
210
- "grad_norm": 0.30552938976565036,
211
- "learning_rate": 1.743160500034443e-06,
212
- "loss": 0.7498,
213
  "step": 140
214
  },
215
  {
216
- "epoch": 0.8579881656804734,
217
- "grad_norm": 0.3199948980261291,
218
- "learning_rate": 1.2052624879351105e-06,
219
- "loss": 0.7566,
220
  "step": 145
221
  },
222
  {
223
- "epoch": 0.8875739644970414,
224
- "grad_norm": 0.3029762272084133,
225
- "learning_rate": 7.612046748871327e-07,
226
- "loss": 0.7666,
227
  "step": 150
228
  },
229
  {
230
- "epoch": 0.9171597633136095,
231
- "grad_norm": 0.30367592763134704,
232
- "learning_rate": 4.1572517541747294e-07,
233
- "loss": 0.7613,
234
  "step": 155
235
  },
236
  {
237
- "epoch": 0.9467455621301775,
238
- "grad_norm": 0.29238016566832153,
239
- "learning_rate": 1.7251026952640583e-07,
240
- "loss": 0.7607,
241
  "step": 160
242
  },
243
  {
244
- "epoch": 0.9763313609467456,
245
- "grad_norm": 0.2814543563664885,
246
- "learning_rate": 3.4155069933301535e-08,
247
- "loss": 0.7446,
248
  "step": 165
249
  },
250
  {
251
- "epoch": 1.0,
252
- "step": 169,
253
- "total_flos": 76916824473600.0,
254
- "train_loss": 0.8026671296746067,
255
- "train_runtime": 1041.4464,
256
- "train_samples_per_second": 20.75,
257
- "train_steps_per_second": 0.162
258
  }
259
  ],
260
  "logging_steps": 5,
261
- "max_steps": 169,
262
  "num_input_tokens_seen": 0,
263
  "num_train_epochs": 1,
264
  "save_steps": 500,
@@ -274,8 +274,8 @@
274
  "attributes": {}
275
  }
276
  },
277
- "total_flos": 76916824473600.0,
278
- "train_batch_size": 4,
279
  "trial_name": null,
280
  "trial_params": null
281
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9948186528497409,
5
  "eval_steps": 100,
6
+ "global_step": 168,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.029607698001480384,
13
+ "grad_norm": 2.3427891450842355,
14
  "learning_rate": 5.882352941176471e-06,
15
+ "loss": 1.0993,
16
  "step": 5
17
  },
18
  {
19
+ "epoch": 0.05921539600296077,
20
+ "grad_norm": 1.638241993747605,
21
  "learning_rate": 1.1764705882352942e-05,
22
  "loss": 1.0402,
23
  "step": 10
24
  },
25
  {
26
+ "epoch": 0.08882309400444116,
27
+ "grad_norm": 0.8970481588482149,
28
  "learning_rate": 1.7647058823529414e-05,
29
  "loss": 0.9533,
30
  "step": 15
31
  },
32
  {
33
+ "epoch": 0.11843079200592153,
34
+ "grad_norm": 0.6126224631372397,
35
+ "learning_rate": 1.9980527694749952e-05,
36
  "loss": 0.8922,
37
  "step": 20
38
  },
39
  {
40
+ "epoch": 0.14803849000740193,
41
+ "grad_norm": 0.5491637741037821,
42
+ "learning_rate": 1.986180478852149e-05,
43
  "loss": 0.8551,
44
  "step": 25
45
  },
46
  {
47
+ "epoch": 0.17764618800888232,
48
+ "grad_norm": 0.4357332212949219,
49
+ "learning_rate": 1.963645895935632e-05,
50
+ "loss": 0.8282,
51
  "step": 30
52
  },
53
  {
54
+ "epoch": 0.20725388601036268,
55
+ "grad_norm": 0.3870290405518921,
56
+ "learning_rate": 1.930692657985482e-05,
57
  "loss": 0.8229,
58
  "step": 35
59
  },
60
  {
61
+ "epoch": 0.23686158401184307,
62
+ "grad_norm": 0.41071978450812635,
63
+ "learning_rate": 1.887677045685188e-05,
64
  "loss": 0.8225,
65
  "step": 40
66
  },
67
  {
68
+ "epoch": 0.2664692820133235,
69
+ "grad_norm": 0.35511835037971484,
70
+ "learning_rate": 1.8350641311400813e-05,
71
  "loss": 0.8073,
72
  "step": 45
73
  },
74
  {
75
+ "epoch": 0.29607698001480387,
76
+ "grad_norm": 0.34562914008715157,
77
+ "learning_rate": 1.773422749654988e-05,
78
  "loss": 0.7977,
79
  "step": 50
80
  },
81
  {
82
+ "epoch": 0.32568467801628426,
83
+ "grad_norm": 0.38429745030506335,
84
+ "learning_rate": 1.7034193496547903e-05,
85
  "loss": 0.7877,
86
  "step": 55
87
  },
88
  {
89
+ "epoch": 0.35529237601776464,
90
+ "grad_norm": 0.38450100811350485,
91
+ "learning_rate": 1.6258107872407376e-05,
92
+ "loss": 0.7982,
93
  "step": 60
94
  },
95
  {
96
+ "epoch": 0.38490007401924503,
97
+ "grad_norm": 0.3508167601846127,
98
+ "learning_rate": 1.5414361432856475e-05,
99
  "loss": 0.7722,
100
  "step": 65
101
  },
102
  {
103
+ "epoch": 0.41450777202072536,
104
+ "grad_norm": 0.354273236485806,
105
+ "learning_rate": 1.4512076515391375e-05,
106
  "loss": 0.7741,
107
  "step": 70
108
  },
109
  {
110
+ "epoch": 0.44411547002220575,
111
+ "grad_norm": 0.3435732479373877,
112
+ "learning_rate": 1.356100835825547e-05,
113
  "loss": 0.7852,
114
  "step": 75
115
  },
116
  {
117
+ "epoch": 0.47372316802368614,
118
+ "grad_norm": 0.3554552572459275,
119
+ "learning_rate": 1.257143962968246e-05,
120
  "loss": 0.7667,
121
  "step": 80
122
  },
123
  {
124
+ "epoch": 0.5033308660251665,
125
+ "grad_norm": 0.37807154489345807,
126
+ "learning_rate": 1.155406925472205e-05,
127
+ "loss": 0.7707,
128
  "step": 85
129
  },
130
  {
131
+ "epoch": 0.532938564026647,
132
+ "grad_norm": 0.3667445569072502,
133
+ "learning_rate": 1.0519896741619803e-05,
134
  "loss": 0.7701,
135
  "step": 90
136
  },
137
  {
138
+ "epoch": 0.5625462620281273,
139
+ "grad_norm": 0.3813236840767762,
140
+ "learning_rate": 9.480103258380198e-06,
141
  "loss": 0.7772,
142
  "step": 95
143
  },
144
  {
145
+ "epoch": 0.5921539600296077,
146
+ "grad_norm": 0.34559010442801097,
147
+ "learning_rate": 8.445930745277953e-06,
148
+ "loss": 0.7531,
149
  "step": 100
150
  },
151
  {
152
+ "epoch": 0.5921539600296077,
153
+ "eval_loss": 0.7882063388824463,
154
+ "eval_runtime": 1.6572,
155
+ "eval_samples_per_second": 77.237,
156
+ "eval_steps_per_second": 2.414,
157
  "step": 100
158
  },
159
  {
160
+ "epoch": 0.6217616580310881,
161
+ "grad_norm": 0.341364069372392,
162
+ "learning_rate": 7.428560370317542e-06,
163
+ "loss": 0.7571,
164
  "step": 105
165
  },
166
  {
167
+ "epoch": 0.6513693560325685,
168
+ "grad_norm": 0.3559969343392935,
169
+ "learning_rate": 6.438991641744531e-06,
170
  "loss": 0.7682,
171
  "step": 110
172
  },
173
  {
174
+ "epoch": 0.6809770540340488,
175
+ "grad_norm": 0.3161514819141605,
176
+ "learning_rate": 5.487923484608629e-06,
177
+ "loss": 0.7625,
178
  "step": 115
179
  },
180
  {
181
+ "epoch": 0.7105847520355293,
182
+ "grad_norm": 0.30339451601959244,
183
+ "learning_rate": 4.5856385671435285e-06,
184
+ "loss": 0.7515,
185
  "step": 120
186
  },
187
  {
188
+ "epoch": 0.7401924500370096,
189
+ "grad_norm": 0.35261640845743664,
190
+ "learning_rate": 3.7418921275926245e-06,
191
+ "loss": 0.7539,
192
  "step": 125
193
  },
194
  {
195
+ "epoch": 0.7698001480384901,
196
+ "grad_norm": 0.3193239804674359,
197
+ "learning_rate": 2.965806503452098e-06,
198
+ "loss": 0.7556,
199
  "step": 130
200
  },
201
  {
202
+ "epoch": 0.7994078460399704,
203
+ "grad_norm": 0.32912058451560244,
204
+ "learning_rate": 2.265772503450122e-06,
205
+ "loss": 0.7586,
206
  "step": 135
207
  },
208
  {
209
+ "epoch": 0.8290155440414507,
210
+ "grad_norm": 0.303685802235117,
211
+ "learning_rate": 1.6493586885991908e-06,
212
+ "loss": 0.75,
213
  "step": 140
214
  },
215
  {
216
+ "epoch": 0.8586232420429312,
217
+ "grad_norm": 0.3173990670917867,
218
+ "learning_rate": 1.1232295431481222e-06,
219
+ "loss": 0.7568,
220
  "step": 145
221
  },
222
  {
223
+ "epoch": 0.8882309400444115,
224
+ "grad_norm": 0.3010954114203951,
225
+ "learning_rate": 6.930734201451817e-07,
226
+ "loss": 0.7668,
227
  "step": 150
228
  },
229
  {
230
+ "epoch": 0.9178386380458919,
231
+ "grad_norm": 0.30232823482832394,
232
+ "learning_rate": 3.635410406436857e-07,
233
+ "loss": 0.7616,
234
  "step": 155
235
  },
236
  {
237
+ "epoch": 0.9474463360473723,
238
+ "grad_norm": 0.28964694770453414,
239
+ "learning_rate": 1.3819521147851122e-07,
240
+ "loss": 0.7609,
241
  "step": 160
242
  },
243
  {
244
+ "epoch": 0.9770540340488527,
245
+ "grad_norm": 0.28146626492833826,
246
+ "learning_rate": 1.947230525005006e-08,
247
+ "loss": 0.7448,
248
  "step": 165
249
  },
250
  {
251
+ "epoch": 0.9948186528497409,
252
+ "step": 168,
253
+ "total_flos": 76517996494848.0,
254
+ "train_loss": 0.8023917121546609,
255
+ "train_runtime": 1111.3885,
256
+ "train_samples_per_second": 19.444,
257
+ "train_steps_per_second": 0.151
258
  }
259
  ],
260
  "logging_steps": 5,
261
+ "max_steps": 168,
262
  "num_input_tokens_seen": 0,
263
  "num_train_epochs": 1,
264
  "save_steps": 500,
 
274
  "attributes": {}
275
  }
276
  },
277
+ "total_flos": 76517996494848.0,
278
+ "train_batch_size": 2,
279
  "trial_name": null,
280
  "trial_params": null
281
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0041f765e83070c411e294af2c5822a097d32d77ef14a249a448aa887d9f0a53
3
- size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26ec61c7b0fd94fbbbbf8ec1c09add723b9f6175b65e350bab79108a82d4defe
3
+ size 7416