haibotamiu commited on
Commit
61cf006
1 Parent(s): c886c86

End of training

Browse files
README.md CHANGED
@@ -2,7 +2,10 @@
2
  license: apache-2.0
3
  base_model: bert-base-uncased
4
  tags:
 
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: out
8
  results: []
@@ -13,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # out
15
 
16
- This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset.
17
 
18
  ## Model description
19
 
 
2
  license: apache-2.0
3
  base_model: bert-base-uncased
4
  tags:
5
+ - question-answering
6
  - generated_from_trainer
7
+ datasets:
8
+ - squad
9
  model-index:
10
  - name: out
11
  results: []
 
16
 
17
  # out
18
 
19
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "epoch": 50.0,
3
  "eval_samples": 5000,
4
- "exact_match": 59.0,
5
- "f1": 68.40149035187889,
6
- "train_loss": 0.14310269137856307,
7
- "train_runtime": 1143.4603,
8
  "train_samples": 5000,
9
- "train_samples_per_second": 218.635,
10
- "train_steps_per_second": 6.865
11
  }
 
1
  {
2
+ "epoch": 3.0,
3
  "eval_samples": 5000,
4
+ "exact_match": 41.04,
5
+ "f1": 52.204443710543785,
6
+ "train_loss": 3.0070706685384114,
7
+ "train_runtime": 122.9618,
8
  "train_samples": 5000,
9
+ "train_samples_per_second": 121.989,
10
+ "train_steps_per_second": 0.488
11
  }
args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fbec49d09f7ae6605a5933a16596aedf0c32832eacf337a55178dfe0c615d82
3
- size 5588
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a8faeb17d6700282e0433bd20d12f060b5026dc3729080a6f3feb4a612936d
3
+ size 5652
eval_nbest_predictions.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b96b2d9c27fcb5a811d5835f706170a58ac14efb3a26b03737cd5f0737caab1
3
- size 22976135
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:680db1c5e66e56840b3d9031e79bd2145f82728d53811cb3876e4c00eb74e5b3
3
+ size 22841996
eval_predictions.json CHANGED
The diff for this file is too large to render. See raw diff
 
eval_results.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "epoch": 50.0,
3
  "eval_samples": 5000,
4
- "exact_match": 59.0,
5
- "f1": 68.40149035187889
6
  }
 
1
  {
2
+ "epoch": 3.0,
3
  "eval_samples": 5000,
4
+ "exact_match": 41.04,
5
+ "f1": 52.204443710543785
6
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 50.0,
3
- "train_loss": 0.14310269137856307,
4
- "train_runtime": 1143.4603,
5
  "train_samples": 5000,
6
- "train_samples_per_second": 218.635,
7
- "train_steps_per_second": 6.865
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "train_loss": 3.0070706685384114,
4
+ "train_runtime": 122.9618,
5
  "train_samples": 5000,
6
+ "train_samples_per_second": 121.989,
7
+ "train_steps_per_second": 0.488
8
  }
trainer_state.json CHANGED
@@ -1,118 +1,28 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 50.0,
5
  "eval_steps": 500,
6
- "global_step": 7850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 3.18,
13
- "learning_rate": 4.681528662420383e-05,
14
- "loss": 1.5828,
15
- "step": 500
16
- },
17
- {
18
- "epoch": 6.37,
19
- "learning_rate": 4.3630573248407646e-05,
20
- "loss": 0.2954,
21
- "step": 1000
22
- },
23
- {
24
- "epoch": 9.55,
25
- "learning_rate": 4.044585987261147e-05,
26
- "loss": 0.123,
27
- "step": 1500
28
- },
29
- {
30
- "epoch": 12.74,
31
- "learning_rate": 3.7261146496815283e-05,
32
- "loss": 0.0777,
33
- "step": 2000
34
- },
35
- {
36
- "epoch": 15.92,
37
- "learning_rate": 3.407643312101911e-05,
38
- "loss": 0.0501,
39
- "step": 2500
40
- },
41
- {
42
- "epoch": 19.11,
43
- "learning_rate": 3.089171974522293e-05,
44
- "loss": 0.0339,
45
- "step": 3000
46
- },
47
- {
48
- "epoch": 22.29,
49
- "learning_rate": 2.7707006369426753e-05,
50
- "loss": 0.0237,
51
- "step": 3500
52
- },
53
- {
54
- "epoch": 25.48,
55
- "learning_rate": 2.4522292993630575e-05,
56
- "loss": 0.0156,
57
- "step": 4000
58
- },
59
- {
60
- "epoch": 28.66,
61
- "learning_rate": 2.1337579617834397e-05,
62
- "loss": 0.0123,
63
- "step": 4500
64
- },
65
- {
66
- "epoch": 31.85,
67
- "learning_rate": 1.8152866242038215e-05,
68
- "loss": 0.0092,
69
- "step": 5000
70
- },
71
- {
72
- "epoch": 35.03,
73
- "learning_rate": 1.4968152866242039e-05,
74
- "loss": 0.0065,
75
- "step": 5500
76
- },
77
- {
78
- "epoch": 38.22,
79
- "learning_rate": 1.178343949044586e-05,
80
- "loss": 0.0057,
81
- "step": 6000
82
- },
83
- {
84
- "epoch": 41.4,
85
- "learning_rate": 8.598726114649681e-06,
86
- "loss": 0.0035,
87
- "step": 6500
88
- },
89
- {
90
- "epoch": 44.59,
91
- "learning_rate": 5.414012738853504e-06,
92
- "loss": 0.0034,
93
- "step": 7000
94
- },
95
- {
96
- "epoch": 47.77,
97
- "learning_rate": 2.229299363057325e-06,
98
- "loss": 0.0024,
99
- "step": 7500
100
- },
101
- {
102
- "epoch": 50.0,
103
- "step": 7850,
104
- "total_flos": 2.4497456256e+16,
105
- "train_loss": 0.14310269137856307,
106
- "train_runtime": 1143.4603,
107
- "train_samples_per_second": 218.635,
108
- "train_steps_per_second": 6.865
109
  }
110
  ],
111
  "logging_steps": 500,
112
- "max_steps": 7850,
113
- "num_train_epochs": 50,
114
  "save_steps": 500,
115
- "total_flos": 2.4497456256e+16,
116
  "trial_name": null,
117
  "trial_params": null
118
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 3.0,
13
+ "step": 60,
14
+ "total_flos": 2939588513280000.0,
15
+ "train_loss": 3.0070706685384114,
16
+ "train_runtime": 122.9618,
17
+ "train_samples_per_second": 121.989,
18
+ "train_steps_per_second": 0.488
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
  ],
21
  "logging_steps": 500,
22
+ "max_steps": 60,
23
+ "num_train_epochs": 3,
24
  "save_steps": 500,
25
+ "total_flos": 2939588513280000.0,
26
  "trial_name": null,
27
  "trial_params": null
28
  }