ahmedalhammadi
commited on
Model save
Browse files- README.md +1 -1
- all_results.json +5 -5
- train_results.json +5 -5
- trainer_state.json +128 -9
README.md
CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
|
|
27 |
|
28 |
## Training procedure
|
29 |
|
30 |
-
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/ahmed-alhammadi-technology-innovation-institute/huggingface/runs/
|
31 |
|
32 |
This model was trained with SFT.
|
33 |
|
|
|
27 |
|
28 |
## Training procedure
|
29 |
|
30 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/ahmed-alhammadi-technology-innovation-institute/huggingface/runs/8yuu3q5i)
|
31 |
|
32 |
This model was trained with SFT.
|
33 |
|
all_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"total_flos":
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 12593,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 2.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"total_flos": 2783887138750464.0,
|
4 |
+
"train_loss": 1.8076151853584381,
|
5 |
+
"train_runtime": 66.5306,
|
6 |
"train_samples": 12593,
|
7 |
+
"train_samples_per_second": 159.686,
|
8 |
+
"train_steps_per_second": 2.495
|
9 |
}
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"total_flos":
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 12593,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 2.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"total_flos": 2783887138750464.0,
|
4 |
+
"train_loss": 1.8076151853584381,
|
5 |
+
"train_runtime": 66.5306,
|
6 |
"train_samples": 12593,
|
7 |
+
"train_samples_per_second": 159.686,
|
8 |
+
"train_steps_per_second": 2.495
|
9 |
}
|
trainer_state.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"best_model_checkpoint": null,
|
4 |
"epoch": 1.0,
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -127,18 +127,137 @@
|
|
127 |
"loss": 3.6707,
|
128 |
"step": 80
|
129 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
{
|
131 |
"epoch": 1.0,
|
132 |
-
"step":
|
133 |
-
"total_flos":
|
134 |
-
"train_loss":
|
135 |
-
"train_runtime":
|
136 |
-
"train_samples_per_second":
|
137 |
-
"train_steps_per_second": 2.
|
138 |
}
|
139 |
],
|
140 |
"logging_steps": 5,
|
141 |
-
"max_steps":
|
142 |
"num_input_tokens_seen": 0,
|
143 |
"num_train_epochs": 1,
|
144 |
"save_steps": 100,
|
@@ -154,7 +273,7 @@
|
|
154 |
"attributes": {}
|
155 |
}
|
156 |
},
|
157 |
-
"total_flos":
|
158 |
"train_batch_size": 16,
|
159 |
"trial_name": null,
|
160 |
"trial_params": null
|
|
|
3 |
"best_model_checkpoint": null,
|
4 |
"epoch": 1.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 166,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
127 |
"loss": 3.6707,
|
128 |
"step": 80
|
129 |
},
|
130 |
+
{
|
131 |
+
"epoch": 0.5120481927710844,
|
132 |
+
"grad_norm": 1.03125,
|
133 |
+
"learning_rate": 2.3529411764705884e-05,
|
134 |
+
"loss": 3.6468,
|
135 |
+
"step": 85
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.5421686746987951,
|
139 |
+
"grad_norm": 0.89453125,
|
140 |
+
"learning_rate": 8.23529411764706e-05,
|
141 |
+
"loss": 3.6491,
|
142 |
+
"step": 90
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.572289156626506,
|
146 |
+
"grad_norm": 1.0703125,
|
147 |
+
"learning_rate": 0.0001411764705882353,
|
148 |
+
"loss": 3.6893,
|
149 |
+
"step": 95
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.6024096385542169,
|
153 |
+
"grad_norm": 1.0625,
|
154 |
+
"learning_rate": 0.0002,
|
155 |
+
"loss": 3.6404,
|
156 |
+
"step": 100
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.6325301204819277,
|
160 |
+
"grad_norm": 0.92578125,
|
161 |
+
"learning_rate": 0.00019944481853548335,
|
162 |
+
"loss": 3.6761,
|
163 |
+
"step": 105
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.6626506024096386,
|
167 |
+
"grad_norm": 0.94140625,
|
168 |
+
"learning_rate": 0.00019778543867110426,
|
169 |
+
"loss": 3.6328,
|
170 |
+
"step": 110
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.6927710843373494,
|
174 |
+
"grad_norm": 0.91796875,
|
175 |
+
"learning_rate": 0.00019504028554572864,
|
176 |
+
"loss": 3.6478,
|
177 |
+
"step": 115
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.7228915662650602,
|
181 |
+
"grad_norm": 1.0703125,
|
182 |
+
"learning_rate": 0.00019123984032200586,
|
183 |
+
"loss": 3.6035,
|
184 |
+
"step": 120
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.7530120481927711,
|
188 |
+
"grad_norm": 0.87890625,
|
189 |
+
"learning_rate": 0.00018642630173483832,
|
190 |
+
"loss": 3.6189,
|
191 |
+
"step": 125
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.7831325301204819,
|
195 |
+
"grad_norm": 0.88671875,
|
196 |
+
"learning_rate": 0.00018065311753227273,
|
197 |
+
"loss": 3.6364,
|
198 |
+
"step": 130
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.8132530120481928,
|
202 |
+
"grad_norm": 0.96484375,
|
203 |
+
"learning_rate": 0.00017398439101151905,
|
204 |
+
"loss": 3.6213,
|
205 |
+
"step": 135
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.8433734939759037,
|
209 |
+
"grad_norm": 0.90625,
|
210 |
+
"learning_rate": 0.0001664941692397025,
|
211 |
+
"loss": 3.558,
|
212 |
+
"step": 140
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.8734939759036144,
|
216 |
+
"grad_norm": 0.91796875,
|
217 |
+
"learning_rate": 0.00015826562086267956,
|
218 |
+
"loss": 3.5992,
|
219 |
+
"step": 145
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.9036144578313253,
|
223 |
+
"grad_norm": 0.84765625,
|
224 |
+
"learning_rate": 0.00014939011263122634,
|
225 |
+
"loss": 3.5675,
|
226 |
+
"step": 150
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.9337349397590361,
|
230 |
+
"grad_norm": 0.78515625,
|
231 |
+
"learning_rate": 0.00013996619489850822,
|
232 |
+
"loss": 3.5625,
|
233 |
+
"step": 155
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.963855421686747,
|
237 |
+
"grad_norm": 0.84765625,
|
238 |
+
"learning_rate": 0.0001300985073534919,
|
239 |
+
"loss": 3.5525,
|
240 |
+
"step": 160
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.9939759036144579,
|
244 |
+
"grad_norm": 0.890625,
|
245 |
+
"learning_rate": 0.00011989661714062999,
|
246 |
+
"loss": 3.5954,
|
247 |
+
"step": 165
|
248 |
+
},
|
249 |
{
|
250 |
"epoch": 1.0,
|
251 |
+
"step": 166,
|
252 |
+
"total_flos": 2783887138750464.0,
|
253 |
+
"train_loss": 1.8076151853584381,
|
254 |
+
"train_runtime": 66.5306,
|
255 |
+
"train_samples_per_second": 159.686,
|
256 |
+
"train_steps_per_second": 2.495
|
257 |
}
|
258 |
],
|
259 |
"logging_steps": 5,
|
260 |
+
"max_steps": 166,
|
261 |
"num_input_tokens_seen": 0,
|
262 |
"num_train_epochs": 1,
|
263 |
"save_steps": 100,
|
|
|
273 |
"attributes": {}
|
274 |
}
|
275 |
},
|
276 |
+
"total_flos": 2783887138750464.0,
|
277 |
"train_batch_size": 16,
|
278 |
"trial_name": null,
|
279 |
"trial_params": null
|