w05230505 commited on
Commit
87bbc81
1 Parent(s): 7ad593e

End of training

Browse files
Files changed (6) hide show
  1. README.md +22 -4
  2. all_results.json +11 -11
  3. config.json +1 -0
  4. eval_results.json +8 -8
  5. train_results.json +4 -4
  6. trainer_state.json +4 -4
README.md CHANGED
@@ -1,15 +1,33 @@
1
  ---
2
  library_name: transformers
 
 
3
  license: apache-2.0
4
  base_model: bert-base-uncased
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - accuracy
9
  - f1
10
  model-index:
11
  - name: bert-base-uncased-finetuned-mrpc
12
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  ---
14
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -17,12 +35,12 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # bert-base-uncased-finetuned-mrpc
19
 
20
- This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
 
22
  - Accuracy: 0.8529
23
- - Combined Score: 0.8744
24
  - F1: 0.8958
25
- - Loss: 0.4322
26
 
27
  ## Model description
28
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  license: apache-2.0
6
  base_model: bert-base-uncased
7
  tags:
8
  - generated_from_trainer
9
+ datasets:
10
+ - glue
11
  metrics:
12
  - accuracy
13
  - f1
14
  model-index:
15
  - name: bert-base-uncased-finetuned-mrpc
16
+ results:
17
+ - task:
18
+ name: Text Classification
19
+ type: text-classification
20
+ dataset:
21
+ name: GLUE MRPC
22
+ type: glue
23
+ args: mrpc
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.8529411764705882
28
+ - name: F1
29
+ type: f1
30
+ value: 0.8958333333333334
31
  ---
32
 
33
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  # bert-base-uncased-finetuned-mrpc
37
 
38
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE MRPC dataset.
39
  It achieves the following results on the evaluation set:
40
+ - Loss: 0.4322
41
  - Accuracy: 0.8529
 
42
  - F1: 0.8958
43
+ - Combined Score: 0.8744
44
 
45
  ## Model description
46
 
all_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_accuracy": 0.6838235294117647,
4
- "eval_combined_score": 0.7480253018237863,
5
- "eval_f1": 0.8122270742358079,
6
- "eval_loss": 0.6342515349388123,
7
  "eval_model_preparation_time": 0.0025,
8
- "eval_runtime": 2.8487,
9
  "eval_samples": 408,
10
- "eval_samples_per_second": 143.222,
11
- "eval_steps_per_second": 17.903,
12
  "total_flos": 2895274053181440.0,
13
- "train_loss": 0.3542040064714957,
14
- "train_runtime": 140.1555,
15
  "train_samples": 3668,
16
- "train_samples_per_second": 78.513,
17
- "train_steps_per_second": 4.923
18
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_accuracy": 0.8529411764705882,
4
+ "eval_combined_score": 0.8743872549019608,
5
+ "eval_f1": 0.8958333333333334,
6
+ "eval_loss": 0.43215397000312805,
7
  "eval_model_preparation_time": 0.0025,
8
+ "eval_runtime": 2.0353,
9
  "eval_samples": 408,
10
+ "eval_samples_per_second": 200.46,
11
+ "eval_steps_per_second": 25.058,
12
  "total_flos": 2895274053181440.0,
13
+ "train_loss": 0.0,
14
+ "train_runtime": 0.0023,
15
  "train_samples": 3668,
16
+ "train_samples_per_second": 4799222.337,
17
+ "train_steps_per_second": 300932.698
18
  }
config.json CHANGED
@@ -23,6 +23,7 @@
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
 
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.44.2",
28
  "type_vocab_size": 2,
 
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.44.2",
29
  "type_vocab_size": 2,
eval_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "eval_accuracy": 0.6838235294117647,
3
- "eval_combined_score": 0.7480253018237863,
4
- "eval_f1": 0.8122270742358079,
5
- "eval_loss": 0.6342515349388123,
6
- "eval_model_preparation_time": 0.0025,
7
- "eval_runtime": 2.8487,
8
  "eval_samples": 408,
9
- "eval_samples_per_second": 143.222,
10
- "eval_steps_per_second": 17.903
11
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.8529411764705882,
4
+ "eval_combined_score": 0.8743872549019608,
5
+ "eval_f1": 0.8958333333333334,
6
+ "eval_loss": 0.43215397000312805,
7
+ "eval_runtime": 2.0353,
8
  "eval_samples": 408,
9
+ "eval_samples_per_second": 200.46,
10
+ "eval_steps_per_second": 25.058
11
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 2895274053181440.0,
4
- "train_loss": 0.3542040064714957,
5
- "train_runtime": 140.1555,
6
  "train_samples": 3668,
7
- "train_samples_per_second": 78.513,
8
- "train_steps_per_second": 4.923
9
  }
 
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 2895274053181440.0,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 0.0023,
6
  "train_samples": 3668,
7
+ "train_samples_per_second": 4799222.337,
8
+ "train_steps_per_second": 300932.698
9
  }
trainer_state.json CHANGED
@@ -66,10 +66,10 @@
66
  "epoch": 3.0,
67
  "step": 690,
68
  "total_flos": 2895274053181440.0,
69
- "train_loss": 0.3542040064714957,
70
- "train_runtime": 140.1555,
71
- "train_samples_per_second": 78.513,
72
- "train_steps_per_second": 4.923
73
  }
74
  ],
75
  "logging_steps": 500,
 
66
  "epoch": 3.0,
67
  "step": 690,
68
  "total_flos": 2895274053181440.0,
69
+ "train_loss": 0.0,
70
+ "train_runtime": 0.0023,
71
+ "train_samples_per_second": 4799222.337,
72
+ "train_steps_per_second": 300932.698
73
  }
74
  ],
75
  "logging_steps": 500,