haibotamiu commited on
Commit
16929c2
1 Parent(s): be53342

End of training

Browse files
README.md CHANGED
@@ -2,7 +2,10 @@
2
  license: apache-2.0
3
  base_model: bert-large-uncased
4
  tags:
 
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: out
8
  results: []
@@ -13,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # out
15
 
16
- This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on the None dataset.
17
 
18
  ## Model description
19
 
 
2
  license: apache-2.0
3
  base_model: bert-large-uncased
4
  tags:
5
+ - question-answering
6
  - generated_from_trainer
7
+ datasets:
8
+ - squad
9
  model-index:
10
  - name: out
11
  results: []
 
16
 
17
  # out
18
 
19
+ This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on the squad dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "epoch": 50.0,
3
  "eval_samples": 3000,
4
- "exact_match": 68.3,
5
- "f1": 77.35539073416115,
6
- "train_loss": 0.1694981136669715,
7
- "train_runtime": 2376.6749,
8
  "train_samples": 3000,
9
- "train_samples_per_second": 63.113,
10
  "train_steps_per_second": 0.505
11
  }
 
1
  {
2
+ "epoch": 100.0,
3
  "eval_samples": 3000,
4
+ "exact_match": 68.86666666666666,
5
+ "f1": 77.76425661888325,
6
+ "train_loss": 0.09819502154986064,
7
+ "train_runtime": 4749.1078,
8
  "train_samples": 3000,
9
+ "train_samples_per_second": 63.17,
10
  "train_steps_per_second": 0.505
11
  }
args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bce3eea016a0c429101ebfc0f9b360212928d83b1f55f34d6e88d07ae2bcb92b
3
  size 5652
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d11f566704b8ea970a8b9270c4a9beee63dee776334b5e5fc069c5088dbf0c
3
  size 5652
eval_nbest_predictions.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6763bbcbd8b21d7e3fed94b03bacdb4881ee275f634f08a710c42512c94e46f3
3
- size 13811032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5bfba2d642d62d6d3840b324816dc1169c1ca3f0e889b0b8dc530396b98917
3
+ size 13896163
eval_predictions.json CHANGED
The diff for this file is too large to render. See raw diff
 
eval_results.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "epoch": 50.0,
3
  "eval_samples": 3000,
4
- "exact_match": 68.3,
5
- "f1": 77.35539073416115
6
  }
 
1
  {
2
+ "epoch": 100.0,
3
  "eval_samples": 3000,
4
+ "exact_match": 68.86666666666666,
5
+ "f1": 77.76425661888325
6
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 50.0,
3
- "train_loss": 0.1694981136669715,
4
- "train_runtime": 2376.6749,
5
  "train_samples": 3000,
6
- "train_samples_per_second": 63.113,
7
  "train_steps_per_second": 0.505
8
  }
 
1
  {
2
+ "epoch": 100.0,
3
+ "train_loss": 0.09819502154986064,
4
+ "train_runtime": 4749.1078,
5
  "train_samples": 3000,
6
+ "train_samples_per_second": 63.17,
7
  "train_steps_per_second": 0.505
8
  }
trainer_state.json CHANGED
@@ -1,40 +1,52 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 50.0,
5
  "eval_steps": 500,
6
- "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 20.83,
13
- "learning_rate": 2.916666666666667e-05,
14
- "loss": 0.3975,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 41.67,
19
- "learning_rate": 8.333333333333334e-06,
20
- "loss": 0.0083,
21
  "step": 1000
22
  },
23
  {
24
- "epoch": 50.0,
25
- "step": 1200,
26
- "total_flos": 1.044795366144e+17,
27
- "train_loss": 0.1694981136669715,
28
- "train_runtime": 2376.6749,
29
- "train_samples_per_second": 63.113,
 
 
 
 
 
 
 
 
 
 
 
 
30
  "train_steps_per_second": 0.505
31
  }
32
  ],
33
  "logging_steps": 500,
34
- "max_steps": 1200,
35
- "num_train_epochs": 50,
36
  "save_steps": 500,
37
- "total_flos": 1.044795366144e+17,
38
  "trial_name": null,
39
  "trial_params": null
40
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 100.0,
5
  "eval_steps": 500,
6
+ "global_step": 2400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 20.83,
13
+ "learning_rate": 3.958333333333333e-05,
14
+ "loss": 0.4465,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 41.67,
19
+ "learning_rate": 2.916666666666667e-05,
20
+ "loss": 0.0159,
21
  "step": 1000
22
  },
23
  {
24
+ "epoch": 62.5,
25
+ "learning_rate": 1.8750000000000002e-05,
26
+ "loss": 0.005,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 83.33,
31
+ "learning_rate": 8.333333333333334e-06,
32
+ "loss": 0.0025,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 100.0,
37
+ "step": 2400,
38
+ "total_flos": 2.089590732288e+17,
39
+ "train_loss": 0.09819502154986064,
40
+ "train_runtime": 4749.1078,
41
+ "train_samples_per_second": 63.17,
42
  "train_steps_per_second": 0.505
43
  }
44
  ],
45
  "logging_steps": 500,
46
+ "max_steps": 2400,
47
+ "num_train_epochs": 100,
48
  "save_steps": 500,
49
+ "total_flos": 2.089590732288e+17,
50
  "trial_name": null,
51
  "trial_params": null
52
  }