haibotamiu commited on
Commit
2b973dc
1 Parent(s): 276c812

End of training

Browse files
README.md CHANGED
@@ -2,7 +2,10 @@
2
  license: apache-2.0
3
  base_model: bert-base-uncased
4
  tags:
 
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: out
8
  results: []
@@ -13,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # out
15
 
16
- This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset.
17
 
18
  ## Model description
19
 
 
2
  license: apache-2.0
3
  base_model: bert-base-uncased
4
  tags:
5
+ - question-answering
6
  - generated_from_trainer
7
+ datasets:
8
+ - squad
9
  model-index:
10
  - name: out
11
  results: []
 
16
 
17
  # out
18
 
19
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "epoch": 100.0,
3
  "eval_samples": 5000,
4
- "exact_match": 47.48,
5
- "f1": 58.93372368623401,
6
- "train_loss": 0.13472519155520543,
7
- "train_runtime": 4354.3545,
8
  "train_samples": 5000,
9
- "train_samples_per_second": 114.828,
10
- "train_steps_per_second": 3.606
11
  }
 
1
  {
2
+ "epoch": 3.0,
3
  "eval_samples": 5000,
4
+ "exact_match": 61.92,
5
+ "f1": 71.73903036462997,
6
+ "train_loss": 1.3744957796327628,
7
+ "train_runtime": 129.2666,
8
  "train_samples": 5000,
9
+ "train_samples_per_second": 116.039,
10
+ "train_steps_per_second": 3.644
11
  }
args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5336760664f52d4b5a64dfb406b2edee6180b9238268ff0ac085b8718d6625ad
3
  size 5652
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f40dc6c969dc97f99d4457518802ea3fd3849b9127d7763d1d0cab49f0af0fd
3
  size 5652
eval_nbest_predictions.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:378bbb9f262a7610ac13df6f3b8b258df6f4de69b201dd4d6653c9ea26acef7d
3
- size 23112866
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ea2f532462c760f6c27d21337b3cabd1ecdf0f4bd44aa85cf898e6b8ef4aa21
3
+ size 22776039
eval_predictions.json CHANGED
The diff for this file is too large to render. See raw diff
 
eval_results.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "epoch": 100.0,
3
  "eval_samples": 5000,
4
- "exact_match": 47.48,
5
- "f1": 58.93372368623401
6
  }
 
1
  {
2
+ "epoch": 3.0,
3
  "eval_samples": 5000,
4
+ "exact_match": 61.92,
5
+ "f1": 71.73903036462997
6
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 100.0,
3
- "train_loss": 0.13472519155520543,
4
- "train_runtime": 4354.3545,
5
  "train_samples": 5000,
6
- "train_samples_per_second": 114.828,
7
- "train_steps_per_second": 3.606
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "train_loss": 1.3744957796327628,
4
+ "train_runtime": 129.2666,
5
  "train_samples": 5000,
6
+ "train_samples_per_second": 116.039,
7
+ "train_steps_per_second": 3.644
8
  }
trainer_state.json CHANGED
@@ -1,214 +1,28 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 100.0,
5
  "eval_steps": 500,
6
- "global_step": 15700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 3.18,
13
- "learning_rate": 4.840764331210191e-05,
14
- "loss": 1.2813,
15
- "step": 500
16
- },
17
- {
18
- "epoch": 6.37,
19
- "learning_rate": 4.681528662420383e-05,
20
- "loss": 0.2415,
21
- "step": 1000
22
- },
23
- {
24
- "epoch": 9.55,
25
- "learning_rate": 4.522292993630574e-05,
26
- "loss": 0.1252,
27
- "step": 1500
28
- },
29
- {
30
- "epoch": 12.74,
31
- "learning_rate": 4.3630573248407646e-05,
32
- "loss": 0.0815,
33
- "step": 2000
34
- },
35
- {
36
- "epoch": 15.92,
37
- "learning_rate": 4.2038216560509556e-05,
38
- "loss": 0.0684,
39
- "step": 2500
40
- },
41
- {
42
- "epoch": 19.11,
43
- "learning_rate": 4.044585987261147e-05,
44
- "loss": 0.0612,
45
- "step": 3000
46
- },
47
- {
48
- "epoch": 22.29,
49
- "learning_rate": 3.885350318471338e-05,
50
- "loss": 0.0629,
51
- "step": 3500
52
- },
53
- {
54
- "epoch": 25.48,
55
- "learning_rate": 3.7261146496815283e-05,
56
- "loss": 0.0734,
57
- "step": 4000
58
- },
59
- {
60
- "epoch": 28.66,
61
- "learning_rate": 3.56687898089172e-05,
62
- "loss": 0.0968,
63
- "step": 4500
64
- },
65
- {
66
- "epoch": 31.85,
67
- "learning_rate": 3.407643312101911e-05,
68
- "loss": 0.1412,
69
- "step": 5000
70
- },
71
- {
72
- "epoch": 35.03,
73
- "learning_rate": 3.248407643312102e-05,
74
- "loss": 0.2021,
75
- "step": 5500
76
- },
77
- {
78
- "epoch": 38.22,
79
- "learning_rate": 3.089171974522293e-05,
80
- "loss": 0.2834,
81
- "step": 6000
82
- },
83
- {
84
- "epoch": 41.4,
85
- "learning_rate": 2.929936305732484e-05,
86
- "loss": 0.3706,
87
- "step": 6500
88
- },
89
- {
90
- "epoch": 44.59,
91
- "learning_rate": 2.7707006369426753e-05,
92
- "loss": 0.3453,
93
- "step": 7000
94
- },
95
- {
96
- "epoch": 47.77,
97
- "learning_rate": 2.6114649681528662e-05,
98
- "loss": 0.2912,
99
- "step": 7500
100
- },
101
- {
102
- "epoch": 50.96,
103
- "learning_rate": 2.4522292993630575e-05,
104
- "loss": 0.2182,
105
- "step": 8000
106
- },
107
- {
108
- "epoch": 54.14,
109
- "learning_rate": 2.2929936305732484e-05,
110
- "loss": 0.1055,
111
- "step": 8500
112
- },
113
- {
114
- "epoch": 57.32,
115
- "learning_rate": 2.1337579617834397e-05,
116
- "loss": 0.0526,
117
- "step": 9000
118
- },
119
- {
120
- "epoch": 60.51,
121
- "learning_rate": 1.974522292993631e-05,
122
- "loss": 0.03,
123
- "step": 9500
124
- },
125
- {
126
- "epoch": 63.69,
127
- "learning_rate": 1.8152866242038215e-05,
128
- "loss": 0.0182,
129
- "step": 10000
130
- },
131
- {
132
- "epoch": 66.88,
133
- "learning_rate": 1.6560509554140128e-05,
134
- "loss": 0.0138,
135
- "step": 10500
136
- },
137
- {
138
- "epoch": 70.06,
139
- "learning_rate": 1.4968152866242039e-05,
140
- "loss": 0.0105,
141
- "step": 11000
142
- },
143
- {
144
- "epoch": 73.25,
145
- "learning_rate": 1.337579617834395e-05,
146
- "loss": 0.0113,
147
- "step": 11500
148
- },
149
- {
150
- "epoch": 76.43,
151
- "learning_rate": 1.178343949044586e-05,
152
- "loss": 0.0086,
153
- "step": 12000
154
- },
155
- {
156
- "epoch": 79.62,
157
- "learning_rate": 1.0191082802547772e-05,
158
- "loss": 0.0073,
159
- "step": 12500
160
- },
161
- {
162
- "epoch": 82.8,
163
- "learning_rate": 8.598726114649681e-06,
164
- "loss": 0.007,
165
- "step": 13000
166
- },
167
- {
168
- "epoch": 85.99,
169
- "learning_rate": 7.006369426751593e-06,
170
- "loss": 0.0049,
171
- "step": 13500
172
- },
173
- {
174
- "epoch": 89.17,
175
- "learning_rate": 5.414012738853504e-06,
176
- "loss": 0.0047,
177
- "step": 14000
178
- },
179
- {
180
- "epoch": 92.36,
181
- "learning_rate": 3.821656050955414e-06,
182
- "loss": 0.0031,
183
- "step": 14500
184
- },
185
- {
186
- "epoch": 95.54,
187
- "learning_rate": 2.229299363057325e-06,
188
- "loss": 0.0043,
189
- "step": 15000
190
- },
191
- {
192
- "epoch": 98.73,
193
- "learning_rate": 6.369426751592357e-07,
194
- "loss": 0.0035,
195
- "step": 15500
196
- },
197
- {
198
- "epoch": 100.0,
199
- "step": 15700,
200
- "total_flos": 9.7986283776e+16,
201
- "train_loss": 0.13472519155520543,
202
- "train_runtime": 4354.3545,
203
- "train_samples_per_second": 114.828,
204
- "train_steps_per_second": 3.606
205
  }
206
  ],
207
  "logging_steps": 500,
208
- "max_steps": 15700,
209
- "num_train_epochs": 100,
210
  "save_steps": 500,
211
- "total_flos": 9.7986283776e+16,
212
  "trial_name": null,
213
  "trial_params": null
214
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 471,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 3.0,
13
+ "step": 471,
14
+ "total_flos": 2939588513280000.0,
15
+ "train_loss": 1.3744957796327628,
16
+ "train_runtime": 129.2666,
17
+ "train_samples_per_second": 116.039,
18
+ "train_steps_per_second": 3.644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
  ],
21
  "logging_steps": 500,
22
+ "max_steps": 471,
23
+ "num_train_epochs": 3,
24
  "save_steps": 500,
25
+ "total_flos": 2939588513280000.0,
26
  "trial_name": null,
27
  "trial_params": null
28
  }