gokulsrinivasagan commited on
Commit
69bdd97
·
verified ·
1 Parent(s): f422ac6

End of training

Browse files
README.md CHANGED
@@ -1,14 +1,32 @@
1
  ---
2
  library_name: transformers
 
 
3
  base_model: gokulsrinivasagan/bert_tiny_lda_50_v1_book
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - matthews_correlation
8
  - accuracy
9
  model-index:
10
  - name: bert_tiny_lda_50_v1_book_cola
11
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,11 +34,11 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # bert_tiny_lda_50_v1_book_cola
18
 
19
- This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_50_v1_book](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_50_v1_book) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7503
22
- - Matthews Correlation: 0.2271
23
- - Accuracy: 0.6999
24
 
25
  ## Model description
26
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  base_model: gokulsrinivasagan/bert_tiny_lda_50_v1_book
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - matthews_correlation
12
  - accuracy
13
  model-index:
14
  - name: bert_tiny_lda_50_v1_book_cola
15
+ results:
16
+ - task:
17
+ name: Text Classification
18
+ type: text-classification
19
+ dataset:
20
+ name: GLUE COLA
21
+ type: glue
22
+ args: cola
23
+ metrics:
24
+ - name: Matthews Correlation
25
+ type: matthews_correlation
26
+ value: 0.1366972617887581
27
+ - name: Accuracy
28
+ type: accuracy
29
+ value: 0.6989453434944153
30
  ---
31
 
32
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
34
 
35
  # bert_tiny_lda_50_v1_book_cola
36
 
37
+ This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_50_v1_book](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_50_v1_book) on the GLUE COLA dataset.
38
  It achieves the following results on the evaluation set:
39
+ - Loss: 0.5935
40
+ - Matthews Correlation: 0.1367
41
+ - Accuracy: 0.6989
42
 
43
  ## Model description
44
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.6989453434944153,
4
+ "eval_loss": 0.5934821963310242,
5
+ "eval_matthews_correlation": 0.1366972617887581,
6
+ "eval_runtime": 0.3508,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 2973.39,
9
+ "eval_steps_per_second": 14.254,
10
+ "total_flos": 1793902531485696.0,
11
+ "train_loss": 0.4772484197336085,
12
+ "train_runtime": 50.017,
13
+ "train_samples": 8551,
14
+ "train_samples_per_second": 8548.097,
15
+ "train_steps_per_second": 33.988
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.6989453434944153,
4
+ "eval_loss": 0.5934821963310242,
5
+ "eval_matthews_correlation": 0.1366972617887581,
6
+ "eval_runtime": 0.3508,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 2973.39,
9
+ "eval_steps_per_second": 14.254
10
+ }
logs/events.out.tfevents.1733835503.ki-g0008.684565.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12c287887791e29a97765d919b2755d600cc8f2bc8b573744db2f95fef397a5f
3
+ size 475
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 1793902531485696.0,
4
+ "train_loss": 0.4772484197336085,
5
+ "train_runtime": 50.017,
6
+ "train_samples": 8551,
7
+ "train_samples_per_second": 8548.097,
8
+ "train_steps_per_second": 33.988
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5934821963310242,
3
+ "best_model_checkpoint": "bert_tiny_lda_50_v1_book_cola/checkpoint-102",
4
+ "epoch": 8.0,
5
+ "eval_steps": 500,
6
+ "global_step": 272,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.7861921191215515,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.6133,
16
+ "step": 34
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6912751793861389,
21
+ "eval_loss": 0.6133261919021606,
22
+ "eval_matthews_correlation": 0.0,
23
+ "eval_runtime": 0.3311,
24
+ "eval_samples_per_second": 3150.274,
25
+ "eval_steps_per_second": 15.102,
26
+ "step": 34
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "grad_norm": 1.130760908126831,
31
+ "learning_rate": 4.8e-05,
32
+ "loss": 0.5995,
33
+ "step": 68
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_accuracy": 0.6893576383590698,
38
+ "eval_loss": 0.6048817038536072,
39
+ "eval_matthews_correlation": -0.02929206145132745,
40
+ "eval_runtime": 0.3328,
41
+ "eval_samples_per_second": 3134.435,
42
+ "eval_steps_per_second": 15.026,
43
+ "step": 68
44
+ },
45
+ {
46
+ "epoch": 3.0,
47
+ "grad_norm": 1.8091745376586914,
48
+ "learning_rate": 4.7e-05,
49
+ "loss": 0.57,
50
+ "step": 102
51
+ },
52
+ {
53
+ "epoch": 3.0,
54
+ "eval_accuracy": 0.6989453434944153,
55
+ "eval_loss": 0.5934821963310242,
56
+ "eval_matthews_correlation": 0.1366972617887581,
57
+ "eval_runtime": 0.332,
58
+ "eval_samples_per_second": 3141.232,
59
+ "eval_steps_per_second": 15.059,
60
+ "step": 102
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "grad_norm": 2.9426748752593994,
65
+ "learning_rate": 4.600000000000001e-05,
66
+ "loss": 0.5209,
67
+ "step": 136
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "eval_accuracy": 0.6845637559890747,
72
+ "eval_loss": 0.6036127805709839,
73
+ "eval_matthews_correlation": 0.1838806266083814,
74
+ "eval_runtime": 0.3338,
75
+ "eval_samples_per_second": 3124.801,
76
+ "eval_steps_per_second": 14.98,
77
+ "step": 136
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "grad_norm": 3.014277935028076,
82
+ "learning_rate": 4.5e-05,
83
+ "loss": 0.466,
84
+ "step": 170
85
+ },
86
+ {
87
+ "epoch": 5.0,
88
+ "eval_accuracy": 0.6826462149620056,
89
+ "eval_loss": 0.6264468431472778,
90
+ "eval_matthews_correlation": 0.20738864502723908,
91
+ "eval_runtime": 0.3336,
92
+ "eval_samples_per_second": 3126.624,
93
+ "eval_steps_per_second": 14.989,
94
+ "step": 170
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "grad_norm": 3.3172547817230225,
99
+ "learning_rate": 4.4000000000000006e-05,
100
+ "loss": 0.3922,
101
+ "step": 204
102
+ },
103
+ {
104
+ "epoch": 6.0,
105
+ "eval_accuracy": 0.6979866027832031,
106
+ "eval_loss": 0.712081789970398,
107
+ "eval_matthews_correlation": 0.1825111909284102,
108
+ "eval_runtime": 0.355,
109
+ "eval_samples_per_second": 2938.433,
110
+ "eval_steps_per_second": 14.086,
111
+ "step": 204
112
+ },
113
+ {
114
+ "epoch": 7.0,
115
+ "grad_norm": 5.064127445220947,
116
+ "learning_rate": 4.3e-05,
117
+ "loss": 0.3492,
118
+ "step": 238
119
+ },
120
+ {
121
+ "epoch": 7.0,
122
+ "eval_accuracy": 0.6989453434944153,
123
+ "eval_loss": 0.7019482851028442,
124
+ "eval_matthews_correlation": 0.22042544777190046,
125
+ "eval_runtime": 0.3311,
126
+ "eval_samples_per_second": 3149.922,
127
+ "eval_steps_per_second": 15.1,
128
+ "step": 238
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "grad_norm": 7.477691650390625,
133
+ "learning_rate": 4.2e-05,
134
+ "loss": 0.307,
135
+ "step": 272
136
+ },
137
+ {
138
+ "epoch": 8.0,
139
+ "eval_accuracy": 0.6999041438102722,
140
+ "eval_loss": 0.7502853274345398,
141
+ "eval_matthews_correlation": 0.2271320033748531,
142
+ "eval_runtime": 0.3593,
143
+ "eval_samples_per_second": 2902.519,
144
+ "eval_steps_per_second": 13.914,
145
+ "step": 272
146
+ },
147
+ {
148
+ "epoch": 8.0,
149
+ "step": 272,
150
+ "total_flos": 1793902531485696.0,
151
+ "train_loss": 0.4772484197336085,
152
+ "train_runtime": 50.017,
153
+ "train_samples_per_second": 8548.097,
154
+ "train_steps_per_second": 33.988
155
+ }
156
+ ],
157
+ "logging_steps": 1,
158
+ "max_steps": 1700,
159
+ "num_input_tokens_seen": 0,
160
+ "num_train_epochs": 50,
161
+ "save_steps": 500,
162
+ "stateful_callbacks": {
163
+ "EarlyStoppingCallback": {
164
+ "args": {
165
+ "early_stopping_patience": 5,
166
+ "early_stopping_threshold": 0.0
167
+ },
168
+ "attributes": {
169
+ "early_stopping_patience_counter": 5
170
+ }
171
+ },
172
+ "TrainerControl": {
173
+ "args": {
174
+ "should_epoch_stop": false,
175
+ "should_evaluate": false,
176
+ "should_log": false,
177
+ "should_save": true,
178
+ "should_training_stop": true
179
+ },
180
+ "attributes": {}
181
+ }
182
+ },
183
+ "total_flos": 1793902531485696.0,
184
+ "train_batch_size": 256,
185
+ "trial_name": null,
186
+ "trial_params": null
187
+ }