w05230505 commited on
Commit
6685115
1 Parent(s): e779c6d

End of training

Browse files
Files changed (6) hide show
  1. README.md +24 -6
  2. all_results.json +11 -11
  3. config.json +1 -0
  4. eval_results.json +7 -7
  5. train_results.json +4 -4
  6. trainer_state.json +4 -4
README.md CHANGED
@@ -1,15 +1,33 @@
1
  ---
2
  library_name: transformers
 
 
3
  license: apache-2.0
4
  base_model: bert-base-uncased
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - accuracy
9
  - f1
10
  model-index:
11
  - name: bert-base-cased-finetuned-sst2
12
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  ---
14
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -17,12 +35,12 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # bert-base-cased-finetuned-sst2
19
 
20
- This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
- - Accuracy: 0.9109
23
- - Combined Score: 0.8954
24
- - F1: 0.8799
25
- - Loss: 0.3585
26
 
27
  ## Model description
28
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  license: apache-2.0
6
  base_model: bert-base-uncased
7
  tags:
8
  - generated_from_trainer
9
+ datasets:
10
+ - glue
11
  metrics:
12
  - accuracy
13
  - f1
14
  model-index:
15
  - name: bert-base-cased-finetuned-sst2
16
+ results:
17
+ - task:
18
+ name: Text Classification
19
+ type: text-classification
20
+ dataset:
21
+ name: GLUE QQP
22
+ type: glue
23
+ args: qqp
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.910784071234232
28
+ - name: F1
29
+ type: f1
30
+ value: 0.8782365054180873
31
  ---
32
 
33
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  # bert-base-cased-finetuned-sst2
37
 
38
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE QQP dataset.
39
  It achieves the following results on the evaluation set:
40
+ - Loss: 0.3776
41
+ - Accuracy: 0.9108
42
+ - F1: 0.8782
43
+ - Combined Score: 0.8945
44
 
45
  ## Model description
46
 
all_results.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_accuracy": 0.9109077417759089,
4
- "eval_combined_score": 0.8953925089190357,
5
- "eval_f1": 0.8798772760621624,
6
- "eval_loss": 0.35853680968284607,
7
- "eval_runtime": 173.7873,
8
  "eval_samples": 40430,
9
- "eval_samples_per_second": 232.641,
10
- "eval_steps_per_second": 29.082,
11
  "total_flos": 2.8719571514554368e+17,
12
- "train_loss": 0.20698899069389173,
13
- "train_runtime": 14606.6124,
14
  "train_samples": 363846,
15
- "train_samples_per_second": 74.729,
16
- "train_steps_per_second": 4.671
17
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_accuracy": 0.910784071234232,
4
+ "eval_combined_score": 0.8945102883261596,
5
+ "eval_f1": 0.8782365054180873,
6
+ "eval_loss": 0.3775930106639862,
7
+ "eval_runtime": 1120.9866,
8
  "eval_samples": 40430,
9
+ "eval_samples_per_second": 36.066,
10
+ "eval_steps_per_second": 4.509,
11
  "total_flos": 2.8719571514554368e+17,
12
+ "train_loss": 0.0,
13
+ "train_runtime": 0.002,
14
  "train_samples": 363846,
15
+ "train_samples_per_second": 559413758.499,
16
+ "train_steps_per_second": 34964320.845
17
  }
config.json CHANGED
@@ -23,6 +23,7 @@
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
 
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.45.0.dev0",
28
  "type_vocab_size": 2,
 
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.45.0.dev0",
29
  "type_vocab_size": 2,
eval_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_accuracy": 0.9109077417759089,
4
- "eval_combined_score": 0.8953925089190357,
5
- "eval_f1": 0.8798772760621624,
6
- "eval_loss": 0.35853680968284607,
7
- "eval_runtime": 173.7873,
8
  "eval_samples": 40430,
9
- "eval_samples_per_second": 232.641,
10
- "eval_steps_per_second": 29.082
11
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_accuracy": 0.910784071234232,
4
+ "eval_combined_score": 0.8945102883261596,
5
+ "eval_f1": 0.8782365054180873,
6
+ "eval_loss": 0.3775930106639862,
7
+ "eval_runtime": 1120.9866,
8
  "eval_samples": 40430,
9
+ "eval_samples_per_second": 36.066,
10
+ "eval_steps_per_second": 4.509
11
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 2.8719571514554368e+17,
4
- "train_loss": 0.20698899069389173,
5
- "train_runtime": 14606.6124,
6
  "train_samples": 363846,
7
- "train_samples_per_second": 74.729,
8
- "train_steps_per_second": 4.671
9
  }
 
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 2.8719571514554368e+17,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 0.002,
6
  "train_samples": 363846,
7
+ "train_samples_per_second": 559413758.499,
8
+ "train_steps_per_second": 34964320.845
9
  }
trainer_state.json CHANGED
@@ -66,10 +66,10 @@
66
  "epoch": 3.0,
67
  "step": 68223,
68
  "total_flos": 2.8719571514554368e+17,
69
- "train_loss": 0.20698899069389173,
70
- "train_runtime": 14606.6124,
71
- "train_samples_per_second": 74.729,
72
- "train_steps_per_second": 4.671
73
  }
74
  ],
75
  "logging_steps": 500,
 
66
  "epoch": 3.0,
67
  "step": 68223,
68
  "total_flos": 2.8719571514554368e+17,
69
+ "train_loss": 0.0,
70
+ "train_runtime": 0.002,
71
+ "train_samples_per_second": 559413758.499,
72
+ "train_steps_per_second": 34964320.845
73
  }
74
  ],
75
  "logging_steps": 500,