maintain safetensors only & newly trained
Browse files- .gitattributes +1 -0
- README.md +9 -13
- all_results.json +10 -10
- eval_results.json +6 -6
- model.safetensors +1 -1
- pytorch_model.bin +0 -3
- train_results.json +5 -5
- trainer_state.json +74 -176
- training_args.bin +1 -1
.gitattributes
CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
37 |
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
37 |
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
38 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -17,8 +17,8 @@ should probably proofread and complete it, then remove this comment. -->
|
|
17 |
|
18 |
This model is a fine-tuned version of [xlm-roberta-large](https://huggingface.co/xlm-roberta-large) on the None dataset.
|
19 |
It achieves the following results on the evaluation set:
|
20 |
-
- Loss: 0.
|
21 |
-
- F1: 0.
|
22 |
|
23 |
## Model description
|
24 |
|
@@ -43,22 +43,18 @@ The following hyperparameters were used during training:
|
|
43 |
- seed: 42
|
44 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
45 |
- lr_scheduler_type: linear
|
46 |
-
- num_epochs:
|
47 |
|
48 |
### Training results
|
49 |
|
50 |
| Training Loss | Epoch | Step | Validation Loss | F1 |
|
51 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
|
52 |
-
| 0.
|
53 |
-
| 0.
|
54 |
-
| 0.
|
55 |
-
| 0.
|
56 |
-
| 0.
|
57 |
-
| 0.
|
58 |
-
| 0.1171 | 7.0 | 3871 | 0.2522 | 0.8627 |
|
59 |
-
| 0.1047 | 8.0 | 4424 | 0.2703 | 0.8665 |
|
60 |
-
| 0.0955 | 9.0 | 4977 | 0.2934 | 0.8638 |
|
61 |
-
| 0.0856 | 10.0 | 5530 | 0.3162 | 0.8667 |
|
62 |
|
63 |
|
64 |
### Framework versions
|
|
|
17 |
|
18 |
This model is a fine-tuned version of [xlm-roberta-large](https://huggingface.co/xlm-roberta-large) on the None dataset.
|
19 |
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 0.2236
|
21 |
+
- F1: 0.8688
|
22 |
|
23 |
## Model description
|
24 |
|
|
|
43 |
- seed: 42
|
44 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
45 |
- lr_scheduler_type: linear
|
46 |
+
- num_epochs: 6
|
47 |
|
48 |
### Training results
|
49 |
|
50 |
| Training Loss | Epoch | Step | Validation Loss | F1 |
|
51 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
|
52 |
+
| 0.2618 | 1.0 | 553 | 0.2357 | 0.8314 |
|
53 |
+
| 0.2025 | 2.0 | 1106 | 0.2209 | 0.8661 |
|
54 |
+
| 0.186 | 3.0 | 1659 | 0.2075 | 0.8588 |
|
55 |
+
| 0.162 | 4.0 | 2212 | 0.2234 | 0.8609 |
|
56 |
+
| 0.1428 | 5.0 | 2765 | 0.2233 | 0.8700 |
|
57 |
+
| 0.1328 | 6.0 | 3318 | 0.2236 | 0.8688 |
|
|
|
|
|
|
|
|
|
58 |
|
59 |
|
60 |
### Framework versions
|
all_results.json
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"eval_f1": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime":
|
6 |
"eval_samples": 3933,
|
7 |
-
"eval_samples_per_second":
|
8 |
-
"eval_steps_per_second":
|
9 |
-
"train_loss": 0.
|
10 |
-
"train_runtime":
|
11 |
"train_samples": 35391,
|
12 |
-
"train_samples_per_second":
|
13 |
-
"train_steps_per_second":
|
14 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 6.0,
|
3 |
+
"eval_f1": 0.8688311688311688,
|
4 |
+
"eval_loss": 0.22363461554050446,
|
5 |
+
"eval_runtime": 3.1776,
|
6 |
"eval_samples": 3933,
|
7 |
+
"eval_samples_per_second": 1237.713,
|
8 |
+
"eval_steps_per_second": 19.511,
|
9 |
+
"train_loss": 0.19685660219106277,
|
10 |
+
"train_runtime": 1032.5628,
|
11 |
"train_samples": 35391,
|
12 |
+
"train_samples_per_second": 205.649,
|
13 |
+
"train_steps_per_second": 3.213
|
14 |
}
|
eval_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"eval_f1": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime":
|
6 |
"eval_samples": 3933,
|
7 |
-
"eval_samples_per_second":
|
8 |
-
"eval_steps_per_second":
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 6.0,
|
3 |
+
"eval_f1": 0.8688311688311688,
|
4 |
+
"eval_loss": 0.22363461554050446,
|
5 |
+
"eval_runtime": 3.1776,
|
6 |
"eval_samples": 3933,
|
7 |
+
"eval_samples_per_second": 1237.713,
|
8 |
+
"eval_steps_per_second": 19.511
|
9 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2235428256
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c8f21c0040acb4a0e2ac14fb522d57a81d8c37ca559014d49e2d2e754c99391
|
3 |
size 2235428256
|
pytorch_model.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:000d654375f4298dae5763995330cb6c89b1c30559d4fc0676844bfad3ac248f
|
3 |
-
size 2235514665
|
|
|
|
|
|
|
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
"train_samples": 35391,
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second":
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 6.0,
|
3 |
+
"train_loss": 0.19685660219106277,
|
4 |
+
"train_runtime": 1032.5628,
|
5 |
"train_samples": 35391,
|
6 |
+
"train_samples_per_second": 205.649,
|
7 |
+
"train_steps_per_second": 3.213
|
8 |
}
|
trainer_state.json
CHANGED
@@ -1,277 +1,175 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch":
|
5 |
-
"global_step":
|
6 |
"is_hyper_param_search": false,
|
7 |
"is_local_process_zero": true,
|
8 |
"is_world_process_zero": true,
|
9 |
"log_history": [
|
10 |
{
|
11 |
"epoch": 0.36,
|
12 |
-
"learning_rate": 9.
|
13 |
-
"loss": 0.
|
14 |
"step": 200
|
15 |
},
|
16 |
{
|
17 |
"epoch": 0.72,
|
18 |
-
"learning_rate":
|
19 |
-
"loss": 0.
|
20 |
"step": 400
|
21 |
},
|
22 |
{
|
23 |
"epoch": 1.0,
|
24 |
-
"eval_f1": 0.
|
25 |
-
"eval_loss": 0.
|
26 |
-
"eval_runtime":
|
27 |
-
"eval_samples_per_second":
|
28 |
-
"eval_steps_per_second":
|
29 |
"step": 553
|
30 |
},
|
31 |
{
|
32 |
"epoch": 1.08,
|
33 |
-
"learning_rate": 8.
|
34 |
-
"loss": 0.
|
35 |
"step": 600
|
36 |
},
|
37 |
{
|
38 |
"epoch": 1.45,
|
39 |
-
"learning_rate":
|
40 |
-
"loss": 0.
|
41 |
"step": 800
|
42 |
},
|
43 |
{
|
44 |
"epoch": 1.81,
|
45 |
-
"learning_rate":
|
46 |
-
"loss": 0.
|
47 |
"step": 1000
|
48 |
},
|
49 |
{
|
50 |
"epoch": 2.0,
|
51 |
-
"eval_f1": 0.
|
52 |
-
"eval_loss": 0.
|
53 |
-
"eval_runtime":
|
54 |
-
"eval_samples_per_second":
|
55 |
-
"eval_steps_per_second":
|
56 |
"step": 1106
|
57 |
},
|
58 |
{
|
59 |
"epoch": 2.17,
|
60 |
-
"learning_rate":
|
61 |
-
"loss": 0.
|
62 |
"step": 1200
|
63 |
},
|
64 |
{
|
65 |
"epoch": 2.53,
|
66 |
-
"learning_rate":
|
67 |
-
"loss": 0.
|
68 |
"step": 1400
|
69 |
},
|
70 |
{
|
71 |
"epoch": 2.89,
|
72 |
-
"learning_rate":
|
73 |
-
"loss": 0.
|
74 |
"step": 1600
|
75 |
},
|
76 |
{
|
77 |
"epoch": 3.0,
|
78 |
-
"eval_f1": 0.
|
79 |
-
"eval_loss": 0.
|
80 |
-
"eval_runtime":
|
81 |
-
"eval_samples_per_second":
|
82 |
-
"eval_steps_per_second":
|
83 |
"step": 1659
|
84 |
},
|
85 |
{
|
86 |
"epoch": 3.25,
|
87 |
-
"learning_rate":
|
88 |
-
"loss": 0.
|
89 |
"step": 1800
|
90 |
},
|
91 |
{
|
92 |
"epoch": 3.62,
|
93 |
-
"learning_rate":
|
94 |
-
"loss": 0.
|
95 |
"step": 2000
|
96 |
},
|
97 |
{
|
98 |
"epoch": 3.98,
|
99 |
-
"learning_rate":
|
100 |
-
"loss": 0.
|
101 |
"step": 2200
|
102 |
},
|
103 |
{
|
104 |
"epoch": 4.0,
|
105 |
-
"eval_f1": 0.
|
106 |
-
"eval_loss": 0.
|
107 |
-
"eval_runtime":
|
108 |
-
"eval_samples_per_second":
|
109 |
-
"eval_steps_per_second":
|
110 |
"step": 2212
|
111 |
},
|
112 |
{
|
113 |
"epoch": 4.34,
|
114 |
-
"learning_rate":
|
115 |
-
"loss": 0.
|
116 |
"step": 2400
|
117 |
},
|
118 |
{
|
119 |
"epoch": 4.7,
|
120 |
-
"learning_rate":
|
121 |
-
"loss": 0.
|
122 |
"step": 2600
|
123 |
},
|
124 |
{
|
125 |
"epoch": 5.0,
|
126 |
-
"eval_f1": 0.
|
127 |
-
"eval_loss": 0.
|
128 |
-
"eval_runtime":
|
129 |
-
"eval_samples_per_second":
|
130 |
-
"eval_steps_per_second":
|
131 |
"step": 2765
|
132 |
},
|
133 |
{
|
134 |
"epoch": 5.06,
|
135 |
-
"learning_rate":
|
136 |
-
"loss": 0.
|
137 |
"step": 2800
|
138 |
},
|
139 |
{
|
140 |
"epoch": 5.42,
|
141 |
-
"learning_rate":
|
142 |
-
"loss": 0.
|
143 |
"step": 3000
|
144 |
},
|
145 |
{
|
146 |
"epoch": 5.79,
|
147 |
-
"learning_rate":
|
148 |
-
"loss": 0.
|
149 |
"step": 3200
|
150 |
},
|
151 |
{
|
152 |
"epoch": 6.0,
|
153 |
-
"eval_f1": 0.
|
154 |
-
"eval_loss": 0.
|
155 |
-
"eval_runtime":
|
156 |
-
"eval_samples_per_second":
|
157 |
-
"eval_steps_per_second":
|
158 |
"step": 3318
|
159 |
},
|
160 |
{
|
161 |
-
"epoch": 6.
|
162 |
-
"
|
163 |
-
"
|
164 |
-
"
|
165 |
-
|
166 |
-
|
167 |
-
"
|
168 |
-
"learning_rate": 3.499095840867993e-06,
|
169 |
-
"loss": 0.1156,
|
170 |
-
"step": 3600
|
171 |
-
},
|
172 |
-
{
|
173 |
-
"epoch": 6.87,
|
174 |
-
"learning_rate": 3.1374321880650997e-06,
|
175 |
-
"loss": 0.1171,
|
176 |
-
"step": 3800
|
177 |
-
},
|
178 |
-
{
|
179 |
-
"epoch": 7.0,
|
180 |
-
"eval_f1": 0.8627246678822611,
|
181 |
-
"eval_loss": 0.25215205550193787,
|
182 |
-
"eval_runtime": 1.24,
|
183 |
-
"eval_samples_per_second": 3171.812,
|
184 |
-
"eval_steps_per_second": 50.001,
|
185 |
-
"step": 3871
|
186 |
-
},
|
187 |
-
{
|
188 |
-
"epoch": 7.23,
|
189 |
-
"learning_rate": 2.7757685352622067e-06,
|
190 |
-
"loss": 0.1142,
|
191 |
-
"step": 4000
|
192 |
-
},
|
193 |
-
{
|
194 |
-
"epoch": 7.59,
|
195 |
-
"learning_rate": 2.414104882459313e-06,
|
196 |
-
"loss": 0.1077,
|
197 |
-
"step": 4200
|
198 |
-
},
|
199 |
-
{
|
200 |
-
"epoch": 7.96,
|
201 |
-
"learning_rate": 2.0524412296564196e-06,
|
202 |
-
"loss": 0.1047,
|
203 |
-
"step": 4400
|
204 |
-
},
|
205 |
-
{
|
206 |
-
"epoch": 8.0,
|
207 |
-
"eval_f1": 0.8664757938573661,
|
208 |
-
"eval_loss": 0.2702699601650238,
|
209 |
-
"eval_runtime": 1.241,
|
210 |
-
"eval_samples_per_second": 3169.155,
|
211 |
-
"eval_steps_per_second": 49.959,
|
212 |
-
"step": 4424
|
213 |
-
},
|
214 |
-
{
|
215 |
-
"epoch": 8.32,
|
216 |
-
"learning_rate": 1.6907775768535265e-06,
|
217 |
-
"loss": 0.1065,
|
218 |
-
"step": 4600
|
219 |
-
},
|
220 |
-
{
|
221 |
-
"epoch": 8.68,
|
222 |
-
"learning_rate": 1.3291139240506329e-06,
|
223 |
-
"loss": 0.0955,
|
224 |
-
"step": 4800
|
225 |
-
},
|
226 |
-
{
|
227 |
-
"epoch": 9.0,
|
228 |
-
"eval_f1": 0.863843648208469,
|
229 |
-
"eval_loss": 0.29335275292396545,
|
230 |
-
"eval_runtime": 1.2406,
|
231 |
-
"eval_samples_per_second": 3170.163,
|
232 |
-
"eval_steps_per_second": 49.975,
|
233 |
-
"step": 4977
|
234 |
-
},
|
235 |
-
{
|
236 |
-
"epoch": 9.04,
|
237 |
-
"learning_rate": 9.674502712477397e-07,
|
238 |
-
"loss": 0.0943,
|
239 |
-
"step": 5000
|
240 |
-
},
|
241 |
-
{
|
242 |
-
"epoch": 9.4,
|
243 |
-
"learning_rate": 6.057866184448463e-07,
|
244 |
-
"loss": 0.0954,
|
245 |
-
"step": 5200
|
246 |
-
},
|
247 |
-
{
|
248 |
-
"epoch": 9.76,
|
249 |
-
"learning_rate": 2.44122965641953e-07,
|
250 |
-
"loss": 0.0856,
|
251 |
-
"step": 5400
|
252 |
-
},
|
253 |
-
{
|
254 |
-
"epoch": 10.0,
|
255 |
-
"eval_f1": 0.8667100977198697,
|
256 |
-
"eval_loss": 0.31616073846817017,
|
257 |
-
"eval_runtime": 1.2411,
|
258 |
-
"eval_samples_per_second": 3168.901,
|
259 |
-
"eval_steps_per_second": 49.955,
|
260 |
-
"step": 5530
|
261 |
-
},
|
262 |
-
{
|
263 |
-
"epoch": 10.0,
|
264 |
-
"step": 5530,
|
265 |
-
"total_flos": 2.167962002090455e+16,
|
266 |
-
"train_loss": 0.1538462488819419,
|
267 |
-
"train_runtime": 909.5125,
|
268 |
-
"train_samples_per_second": 389.121,
|
269 |
-
"train_steps_per_second": 6.08
|
270 |
}
|
271 |
],
|
272 |
-
"max_steps":
|
273 |
-
"num_train_epochs":
|
274 |
-
"total_flos":
|
275 |
"trial_name": null,
|
276 |
"trial_params": null
|
277 |
}
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 6.0,
|
5 |
+
"global_step": 3318,
|
6 |
"is_hyper_param_search": false,
|
7 |
"is_local_process_zero": true,
|
8 |
"is_world_process_zero": true,
|
9 |
"log_history": [
|
10 |
{
|
11 |
"epoch": 0.36,
|
12 |
+
"learning_rate": 9.415310427968657e-06,
|
13 |
+
"loss": 0.5182,
|
14 |
"step": 200
|
15 |
},
|
16 |
{
|
17 |
"epoch": 0.72,
|
18 |
+
"learning_rate": 8.812537673297168e-06,
|
19 |
+
"loss": 0.2618,
|
20 |
"step": 400
|
21 |
},
|
22 |
{
|
23 |
"epoch": 1.0,
|
24 |
+
"eval_f1": 0.8314353647853836,
|
25 |
+
"eval_loss": 0.23568011820316315,
|
26 |
+
"eval_runtime": 3.3132,
|
27 |
+
"eval_samples_per_second": 1187.067,
|
28 |
+
"eval_steps_per_second": 18.713,
|
29 |
"step": 553
|
30 |
},
|
31 |
{
|
32 |
"epoch": 1.08,
|
33 |
+
"learning_rate": 8.212778782399036e-06,
|
34 |
+
"loss": 0.2304,
|
35 |
"step": 600
|
36 |
},
|
37 |
{
|
38 |
"epoch": 1.45,
|
39 |
+
"learning_rate": 7.613019891500905e-06,
|
40 |
+
"loss": 0.2153,
|
41 |
"step": 800
|
42 |
},
|
43 |
{
|
44 |
"epoch": 1.81,
|
45 |
+
"learning_rate": 7.010247136829416e-06,
|
46 |
+
"loss": 0.2025,
|
47 |
"step": 1000
|
48 |
},
|
49 |
{
|
50 |
"epoch": 2.0,
|
51 |
+
"eval_f1": 0.8660783804827408,
|
52 |
+
"eval_loss": 0.22088249027729034,
|
53 |
+
"eval_runtime": 3.1453,
|
54 |
+
"eval_samples_per_second": 1250.451,
|
55 |
+
"eval_steps_per_second": 19.712,
|
56 |
"step": 1106
|
57 |
},
|
58 |
{
|
59 |
"epoch": 2.17,
|
60 |
+
"learning_rate": 6.407474382157926e-06,
|
61 |
+
"loss": 0.1936,
|
62 |
"step": 1200
|
63 |
},
|
64 |
{
|
65 |
"epoch": 2.53,
|
66 |
+
"learning_rate": 5.804701627486437e-06,
|
67 |
+
"loss": 0.179,
|
68 |
"step": 1400
|
69 |
},
|
70 |
{
|
71 |
"epoch": 2.89,
|
72 |
+
"learning_rate": 5.20192887281495e-06,
|
73 |
+
"loss": 0.186,
|
74 |
"step": 1600
|
75 |
},
|
76 |
{
|
77 |
"epoch": 3.0,
|
78 |
+
"eval_f1": 0.8587556125721616,
|
79 |
+
"eval_loss": 0.20751279592514038,
|
80 |
+
"eval_runtime": 3.1566,
|
81 |
+
"eval_samples_per_second": 1245.944,
|
82 |
+
"eval_steps_per_second": 19.641,
|
83 |
"step": 1659
|
84 |
},
|
85 |
{
|
86 |
"epoch": 3.25,
|
87 |
+
"learning_rate": 4.5991561181434605e-06,
|
88 |
+
"loss": 0.1695,
|
89 |
"step": 1800
|
90 |
},
|
91 |
{
|
92 |
"epoch": 3.62,
|
93 |
+
"learning_rate": 3.996383363471971e-06,
|
94 |
+
"loss": 0.1578,
|
95 |
"step": 2000
|
96 |
},
|
97 |
{
|
98 |
"epoch": 3.98,
|
99 |
+
"learning_rate": 3.393610608800483e-06,
|
100 |
+
"loss": 0.162,
|
101 |
"step": 2200
|
102 |
},
|
103 |
{
|
104 |
"epoch": 4.0,
|
105 |
+
"eval_f1": 0.8609391124871001,
|
106 |
+
"eval_loss": 0.22340959310531616,
|
107 |
+
"eval_runtime": 3.171,
|
108 |
+
"eval_samples_per_second": 1240.321,
|
109 |
+
"eval_steps_per_second": 19.552,
|
110 |
"step": 2212
|
111 |
},
|
112 |
{
|
113 |
"epoch": 4.34,
|
114 |
+
"learning_rate": 2.7908378541289937e-06,
|
115 |
+
"loss": 0.1512,
|
116 |
"step": 2400
|
117 |
},
|
118 |
{
|
119 |
"epoch": 4.7,
|
120 |
+
"learning_rate": 2.188065099457505e-06,
|
121 |
+
"loss": 0.1428,
|
122 |
"step": 2600
|
123 |
},
|
124 |
{
|
125 |
"epoch": 5.0,
|
126 |
+
"eval_f1": 0.8699831146902196,
|
127 |
+
"eval_loss": 0.2233397215604782,
|
128 |
+
"eval_runtime": 3.2963,
|
129 |
+
"eval_samples_per_second": 1193.162,
|
130 |
+
"eval_steps_per_second": 18.809,
|
131 |
"step": 2765
|
132 |
},
|
133 |
{
|
134 |
"epoch": 5.06,
|
135 |
+
"learning_rate": 1.5852923447860157e-06,
|
136 |
+
"loss": 0.148,
|
137 |
"step": 2800
|
138 |
},
|
139 |
{
|
140 |
"epoch": 5.42,
|
141 |
+
"learning_rate": 9.82519590114527e-07,
|
142 |
+
"loss": 0.1353,
|
143 |
"step": 3000
|
144 |
},
|
145 |
{
|
146 |
"epoch": 5.79,
|
147 |
+
"learning_rate": 3.79746835443038e-07,
|
148 |
+
"loss": 0.1328,
|
149 |
"step": 3200
|
150 |
},
|
151 |
{
|
152 |
"epoch": 6.0,
|
153 |
+
"eval_f1": 0.8688311688311688,
|
154 |
+
"eval_loss": 0.22363461554050446,
|
155 |
+
"eval_runtime": 3.2118,
|
156 |
+
"eval_samples_per_second": 1224.552,
|
157 |
+
"eval_steps_per_second": 19.304,
|
158 |
"step": 3318
|
159 |
},
|
160 |
{
|
161 |
+
"epoch": 6.0,
|
162 |
+
"step": 3318,
|
163 |
+
"total_flos": 1.2966767506908096e+16,
|
164 |
+
"train_loss": 0.19685660219106277,
|
165 |
+
"train_runtime": 1032.5628,
|
166 |
+
"train_samples_per_second": 205.649,
|
167 |
+
"train_steps_per_second": 3.213
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
}
|
169 |
],
|
170 |
+
"max_steps": 3318,
|
171 |
+
"num_train_epochs": 6,
|
172 |
+
"total_flos": 1.2966767506908096e+16,
|
173 |
"trial_name": null,
|
174 |
"trial_params": null
|
175 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4027
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:081fd3b588506f4bddd3baf10a7d8ef8494d111ef8adba3e7983c033d9d05446
|
3 |
size 4027
|