w05230505 commited on
Commit
787f739
1 Parent(s): 38dca0e

End of training

Browse files
Files changed (6) hide show
  1. README.md +23 -5
  2. all_results.json +10 -10
  3. config.json +1 -0
  4. eval_results.json +7 -7
  5. train_results.json +3 -3
  6. trainer_state.json +3 -3
README.md CHANGED
@@ -1,15 +1,33 @@
1
  ---
2
  library_name: transformers
 
 
3
  license: apache-2.0
4
  base_model: bert-base-uncased
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - accuracy
9
  - f1
10
  model-index:
11
  - name: bert-base-cased-finetuned-qqp
12
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  ---
14
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -17,12 +35,12 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # bert-base-cased-finetuned-qqp
19
 
20
- This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
 
22
  - Accuracy: 0.9109
23
- - Combined Score: 0.8957
24
- - F1: 0.8805
25
- - Loss: 0.3568
26
 
27
  ## Model description
28
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  license: apache-2.0
6
  base_model: bert-base-uncased
7
  tags:
8
  - generated_from_trainer
9
+ datasets:
10
+ - glue
11
  metrics:
12
  - accuracy
13
  - f1
14
  model-index:
15
  - name: bert-base-cased-finetuned-qqp
16
+ results:
17
+ - task:
18
+ name: Text Classification
19
+ type: text-classification
20
+ dataset:
21
+ name: GLUE QQP
22
+ type: glue
23
+ args: qqp
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.9108830076675736
28
+ - name: F1
29
+ type: f1
30
+ value: 0.8798719701263629
31
  ---
32
 
33
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  # bert-base-cased-finetuned-qqp
37
 
38
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE QQP dataset.
39
  It achieves the following results on the evaluation set:
40
+ - Loss: 0.3636
41
  - Accuracy: 0.9109
42
+ - F1: 0.8799
43
+ - Combined Score: 0.8954
 
44
 
45
  ## Model description
46
 
all_results.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_accuracy": 0.9102646549591887,
4
- "eval_combined_score": 0.8956452152607972,
5
- "eval_f1": 0.8810257755624057,
6
- "eval_loss": 0.34155702590942383,
7
- "eval_runtime": 1896.9354,
8
  "eval_samples": 40430,
9
- "eval_samples_per_second": 21.313,
10
- "eval_steps_per_second": 2.664,
11
  "total_flos": 2.8719571514554368e+17,
12
  "train_loss": 0.0,
13
- "train_runtime": 0.0116,
14
  "train_samples": 363846,
15
- "train_samples_per_second": 93935783.158,
16
- "train_steps_per_second": 5871147.806
17
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_accuracy": 0.9108830076675736,
4
+ "eval_combined_score": 0.8953774888969682,
5
+ "eval_f1": 0.8798719701263629,
6
+ "eval_loss": 0.363625168800354,
7
+ "eval_runtime": 6006.0944,
8
  "eval_samples": 40430,
9
+ "eval_samples_per_second": 6.731,
10
+ "eval_steps_per_second": 0.841,
11
  "total_flos": 2.8719571514554368e+17,
12
  "train_loss": 0.0,
13
+ "train_runtime": 0.0017,
14
  "train_samples": 363846,
15
+ "train_samples_per_second": 658929504.829,
16
+ "train_steps_per_second": 41184225.934
17
  }
config.json CHANGED
@@ -23,6 +23,7 @@
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
 
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.45.0.dev0",
28
  "type_vocab_size": 2,
 
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.45.0.dev0",
29
  "type_vocab_size": 2,
eval_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_accuracy": 0.9102646549591887,
4
- "eval_combined_score": 0.8956452152607972,
5
- "eval_f1": 0.8810257755624057,
6
- "eval_loss": 0.34155702590942383,
7
- "eval_runtime": 1896.9354,
8
  "eval_samples": 40430,
9
- "eval_samples_per_second": 21.313,
10
- "eval_steps_per_second": 2.664
11
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_accuracy": 0.9108830076675736,
4
+ "eval_combined_score": 0.8953774888969682,
5
+ "eval_f1": 0.8798719701263629,
6
+ "eval_loss": 0.363625168800354,
7
+ "eval_runtime": 6006.0944,
8
  "eval_samples": 40430,
9
+ "eval_samples_per_second": 6.731,
10
+ "eval_steps_per_second": 0.841
11
  }
train_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 3.0,
3
  "total_flos": 2.8719571514554368e+17,
4
  "train_loss": 0.0,
5
- "train_runtime": 0.0116,
6
  "train_samples": 363846,
7
- "train_samples_per_second": 93935783.158,
8
- "train_steps_per_second": 5871147.806
9
  }
 
2
  "epoch": 3.0,
3
  "total_flos": 2.8719571514554368e+17,
4
  "train_loss": 0.0,
5
+ "train_runtime": 0.0017,
6
  "train_samples": 363846,
7
+ "train_samples_per_second": 658929504.829,
8
+ "train_steps_per_second": 41184225.934
9
  }
trainer_state.json CHANGED
@@ -67,9 +67,9 @@
67
  "step": 68223,
68
  "total_flos": 2.8719571514554368e+17,
69
  "train_loss": 0.0,
70
- "train_runtime": 0.0116,
71
- "train_samples_per_second": 93935783.158,
72
- "train_steps_per_second": 5871147.806
73
  }
74
  ],
75
  "logging_steps": 500,
 
67
  "step": 68223,
68
  "total_flos": 2.8719571514554368e+17,
69
  "train_loss": 0.0,
70
+ "train_runtime": 0.0017,
71
+ "train_samples_per_second": 658929504.829,
72
+ "train_steps_per_second": 41184225.934
73
  }
74
  ],
75
  "logging_steps": 500,