haibotamiu commited on
Commit
65b86d5
1 Parent(s): ad4d51d

End of training

Browse files
README.md CHANGED
@@ -2,7 +2,10 @@
2
  license: apache-2.0
3
  base_model: distilbert-base-uncased
4
  tags:
 
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: out
8
  results: []
@@ -13,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # out
15
 
16
- This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
17
 
18
  ## Model description
19
 
 
2
  license: apache-2.0
3
  base_model: distilbert-base-uncased
4
  tags:
5
+ - question-answering
6
  - generated_from_trainer
7
+ datasets:
8
+ - squad
9
  model-index:
10
  - name: out
11
  results: []
 
16
 
17
  # out
18
 
19
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "epoch": 20.0,
3
  "eval_samples": 10784,
4
- "exact_match": 46.07379375591296,
5
- "f1": 59.615253741406285,
6
- "train_loss": 0.9091191572950066,
7
- "train_runtime": 4840.8355,
8
  "train_samples": 88524,
9
- "train_samples_per_second": 365.739,
10
- "train_steps_per_second": 1.43
11
  }
 
1
  {
2
+ "epoch": 50.0,
3
  "eval_samples": 10784,
4
+ "exact_match": 65.24124881740775,
5
+ "f1": 76.54434353401918,
6
+ "train_loss": 0.4313709532456591,
7
+ "train_runtime": 11982.9489,
8
  "train_samples": 88524,
9
+ "train_samples_per_second": 369.375,
10
+ "train_steps_per_second": 1.444
11
  }
args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26b9976813d721358fcb7d0da6a5d5e6dc67432507a8e9890fbc48828c18e7c5
3
  size 5652
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7a208cb9c63df7b3fad01ad465970877eff6a0fd43b695a27a3c8dad001aee
3
  size 5652
eval_nbest_predictions.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3876c71c4d0c1b62abfa24ba0e3b9860114e49702643e1bc29d684bd13fec948
3
- size 50977373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0cb327220f3cfb972843de31afd7b923afb2a443cc2d6cf2840e2f13217887d
3
+ size 49096744
eval_predictions.json CHANGED
The diff for this file is too large to render. See raw diff
 
eval_results.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "epoch": 20.0,
3
  "eval_samples": 10784,
4
- "exact_match": 46.07379375591296,
5
- "f1": 59.615253741406285
6
  }
 
1
  {
2
+ "epoch": 50.0,
3
  "eval_samples": 10784,
4
+ "exact_match": 65.24124881740775,
5
+ "f1": 76.54434353401918
6
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 20.0,
3
- "train_loss": 0.9091191572950066,
4
- "train_runtime": 4840.8355,
5
  "train_samples": 88524,
6
- "train_samples_per_second": 365.739,
7
- "train_steps_per_second": 1.43
8
  }
 
1
  {
2
+ "epoch": 50.0,
3
+ "train_loss": 0.4313709532456591,
4
+ "train_runtime": 11982.9489,
5
  "train_samples": 88524,
6
+ "train_samples_per_second": 369.375,
7
+ "train_steps_per_second": 1.444
8
  }
trainer_state.json CHANGED
@@ -1,106 +1,232 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 20.0,
5
  "eval_steps": 500,
6
- "global_step": 6920,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 1.45,
13
- "learning_rate": 4.6387283236994224e-05,
14
- "loss": 1.6616,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 2.89,
19
- "learning_rate": 4.2774566473988445e-05,
20
- "loss": 0.9259,
21
  "step": 1000
22
  },
23
  {
24
  "epoch": 4.34,
25
- "learning_rate": 3.916184971098266e-05,
26
- "loss": 0.6562,
27
  "step": 1500
28
  },
29
  {
30
  "epoch": 5.78,
31
- "learning_rate": 3.554913294797688e-05,
32
- "loss": 0.4915,
33
  "step": 2000
34
  },
35
  {
36
  "epoch": 7.23,
37
- "learning_rate": 3.19364161849711e-05,
38
- "loss": 0.3773,
39
  "step": 2500
40
  },
41
  {
42
  "epoch": 8.67,
43
- "learning_rate": 2.832369942196532e-05,
44
- "loss": 0.3321,
45
  "step": 3000
46
  },
47
  {
48
  "epoch": 10.12,
49
- "learning_rate": 2.471098265895954e-05,
50
- "loss": 0.347,
51
  "step": 3500
52
  },
53
  {
54
  "epoch": 11.56,
55
- "learning_rate": 2.1098265895953757e-05,
56
- "loss": 0.3954,
57
  "step": 4000
58
  },
59
  {
60
  "epoch": 13.01,
61
- "learning_rate": 1.748554913294798e-05,
62
- "loss": 0.5166,
63
  "step": 4500
64
  },
65
  {
66
  "epoch": 14.45,
67
- "learning_rate": 1.3872832369942197e-05,
68
- "loss": 0.6966,
69
  "step": 5000
70
  },
71
  {
72
  "epoch": 15.9,
73
- "learning_rate": 1.0260115606936416e-05,
74
- "loss": 1.072,
75
  "step": 5500
76
  },
77
  {
78
  "epoch": 17.34,
79
- "learning_rate": 6.647398843930635e-06,
80
- "loss": 1.4468,
81
  "step": 6000
82
  },
83
  {
84
  "epoch": 18.79,
85
- "learning_rate": 3.0346820809248555e-06,
86
- "loss": 1.8735,
87
  "step": 6500
88
  },
89
  {
90
- "epoch": 20.0,
91
- "step": 6920,
92
- "total_flos": 1.7348902540849152e+17,
93
- "train_loss": 0.9091191572950066,
94
- "train_runtime": 4840.8355,
95
- "train_samples_per_second": 365.739,
96
- "train_steps_per_second": 1.43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  }
98
  ],
99
  "logging_steps": 500,
100
- "max_steps": 6920,
101
- "num_train_epochs": 20,
102
  "save_steps": 500,
103
- "total_flos": 1.7348902540849152e+17,
104
  "trial_name": null,
105
  "trial_params": null
106
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 50.0,
5
  "eval_steps": 500,
6
+ "global_step": 17300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 1.45,
13
+ "learning_rate": 4.855491329479769e-05,
14
+ "loss": 1.6594,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 2.89,
19
+ "learning_rate": 4.710982658959538e-05,
20
+ "loss": 0.9215,
21
  "step": 1000
22
  },
23
  {
24
  "epoch": 4.34,
25
+ "learning_rate": 4.566473988439307e-05,
26
+ "loss": 0.6453,
27
  "step": 1500
28
  },
29
  {
30
  "epoch": 5.78,
31
+ "learning_rate": 4.421965317919075e-05,
32
+ "loss": 0.4758,
33
  "step": 2000
34
  },
35
  {
36
  "epoch": 7.23,
37
+ "learning_rate": 4.2774566473988445e-05,
38
+ "loss": 0.3575,
39
  "step": 2500
40
  },
41
  {
42
  "epoch": 8.67,
43
+ "learning_rate": 4.132947976878613e-05,
44
+ "loss": 0.3063,
45
  "step": 3000
46
  },
47
  {
48
  "epoch": 10.12,
49
+ "learning_rate": 3.988439306358382e-05,
50
+ "loss": 0.31,
51
  "step": 3500
52
  },
53
  {
54
  "epoch": 11.56,
55
+ "learning_rate": 3.84393063583815e-05,
56
+ "loss": 0.3325,
57
  "step": 4000
58
  },
59
  {
60
  "epoch": 13.01,
61
+ "learning_rate": 3.699421965317919e-05,
62
+ "loss": 0.414,
63
  "step": 4500
64
  },
65
  {
66
  "epoch": 14.45,
67
+ "learning_rate": 3.554913294797688e-05,
68
+ "loss": 0.4969,
69
  "step": 5000
70
  },
71
  {
72
  "epoch": 15.9,
73
+ "learning_rate": 3.410404624277457e-05,
74
+ "loss": 0.668,
75
  "step": 5500
76
  },
77
  {
78
  "epoch": 17.34,
79
+ "learning_rate": 3.265895953757225e-05,
80
+ "loss": 0.7799,
81
  "step": 6000
82
  },
83
  {
84
  "epoch": 18.79,
85
+ "learning_rate": 3.1213872832369946e-05,
86
+ "loss": 0.8613,
87
  "step": 6500
88
  },
89
  {
90
+ "epoch": 20.23,
91
+ "learning_rate": 2.9768786127167632e-05,
92
+ "loss": 0.8462,
93
+ "step": 7000
94
+ },
95
+ {
96
+ "epoch": 21.68,
97
+ "learning_rate": 2.832369942196532e-05,
98
+ "loss": 0.7924,
99
+ "step": 7500
100
+ },
101
+ {
102
+ "epoch": 23.12,
103
+ "learning_rate": 2.6878612716763007e-05,
104
+ "loss": 0.7011,
105
+ "step": 8000
106
+ },
107
+ {
108
+ "epoch": 24.57,
109
+ "learning_rate": 2.5433526011560693e-05,
110
+ "loss": 0.5652,
111
+ "step": 8500
112
+ },
113
+ {
114
+ "epoch": 26.01,
115
+ "learning_rate": 2.3988439306358382e-05,
116
+ "loss": 0.4699,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 27.46,
121
+ "learning_rate": 2.254335260115607e-05,
122
+ "loss": 0.3833,
123
+ "step": 9500
124
+ },
125
+ {
126
+ "epoch": 28.9,
127
+ "learning_rate": 2.1098265895953757e-05,
128
+ "loss": 0.3351,
129
+ "step": 10000
130
+ },
131
+ {
132
+ "epoch": 30.35,
133
+ "learning_rate": 1.9653179190751446e-05,
134
+ "loss": 0.2929,
135
+ "step": 10500
136
+ },
137
+ {
138
+ "epoch": 31.79,
139
+ "learning_rate": 1.8208092485549132e-05,
140
+ "loss": 0.2636,
141
+ "step": 11000
142
+ },
143
+ {
144
+ "epoch": 33.24,
145
+ "learning_rate": 1.676300578034682e-05,
146
+ "loss": 0.2358,
147
+ "step": 11500
148
+ },
149
+ {
150
+ "epoch": 34.68,
151
+ "learning_rate": 1.531791907514451e-05,
152
+ "loss": 0.2153,
153
+ "step": 12000
154
+ },
155
+ {
156
+ "epoch": 36.13,
157
+ "learning_rate": 1.3872832369942197e-05,
158
+ "loss": 0.1957,
159
+ "step": 12500
160
+ },
161
+ {
162
+ "epoch": 37.57,
163
+ "learning_rate": 1.2427745664739884e-05,
164
+ "loss": 0.1784,
165
+ "step": 13000
166
+ },
167
+ {
168
+ "epoch": 39.02,
169
+ "learning_rate": 1.0982658959537573e-05,
170
+ "loss": 0.169,
171
+ "step": 13500
172
+ },
173
+ {
174
+ "epoch": 40.46,
175
+ "learning_rate": 9.53757225433526e-06,
176
+ "loss": 0.1565,
177
+ "step": 14000
178
+ },
179
+ {
180
+ "epoch": 41.91,
181
+ "learning_rate": 8.092485549132949e-06,
182
+ "loss": 0.1508,
183
+ "step": 14500
184
+ },
185
+ {
186
+ "epoch": 43.35,
187
+ "learning_rate": 6.647398843930635e-06,
188
+ "loss": 0.1412,
189
+ "step": 15000
190
+ },
191
+ {
192
+ "epoch": 44.8,
193
+ "learning_rate": 5.202312138728324e-06,
194
+ "loss": 0.1378,
195
+ "step": 15500
196
+ },
197
+ {
198
+ "epoch": 46.24,
199
+ "learning_rate": 3.757225433526012e-06,
200
+ "loss": 0.1329,
201
+ "step": 16000
202
+ },
203
+ {
204
+ "epoch": 47.69,
205
+ "learning_rate": 2.3121387283236993e-06,
206
+ "loss": 0.131,
207
+ "step": 16500
208
+ },
209
+ {
210
+ "epoch": 49.13,
211
+ "learning_rate": 8.670520231213873e-07,
212
+ "loss": 0.1268,
213
+ "step": 17000
214
+ },
215
+ {
216
+ "epoch": 50.0,
217
+ "step": 17300,
218
+ "total_flos": 4.337225635212288e+17,
219
+ "train_loss": 0.4313709532456591,
220
+ "train_runtime": 11982.9489,
221
+ "train_samples_per_second": 369.375,
222
+ "train_steps_per_second": 1.444
223
  }
224
  ],
225
  "logging_steps": 500,
226
+ "max_steps": 17300,
227
+ "num_train_epochs": 50,
228
  "save_steps": 500,
229
+ "total_flos": 4.337225635212288e+17,
230
  "trial_name": null,
231
  "trial_params": null
232
  }