EhsanAghazadeh commited on
Commit
627b641
1 Parent(s): 050ce1d

Add model and its dependencies

Browse files
README.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The uploaded model is from epoch 9 with Matthews Correlation of 66.77
2
+
3
+ "best_metric": 0.667660908939119,<br>
4
+ "best_model_checkpoint": "/content/output_dir/checkpoint-2412",<br>
5
+ "epoch": 10.0,<br>
6
+ "global_step": 2680,<br>
7
+ "is_hyper_param_search": false,<br>
8
+ "is_local_process_zero": true,<br>
9
+ "is_world_process_zero": true,<br>
10
+ "max_steps": 2680,<br>
11
+ "num_train_epochs": 10,<br>
12
+ "total_flos": 7189983634007040.0,<br>
13
+ "trial_name": null,<br>
14
+ "trial_params": null<br>
15
+ <table class="table table-bordered table-hover table-condensed">
16
+ <thead><tr><th title="Field #1">epoch</th>
17
+ <th title="Field #2">eval_loss</th>
18
+ <th title="Field #3">eval_matthews_correlation</th>
19
+ <th title="Field #4">eval_runtime</th>
20
+ <th title="Field #5">eval_samples_per_second</th>
21
+ <th title="Field #6">eval_steps_per_second</th>
22
+ <th title="Field #7">step</th>
23
+ <th title="Field #8">learning_rate</th>
24
+ <th title="Field #9">loss</th>
25
+ </tr></thead>
26
+ <tbody><tr>
27
+ <td align="right">1</td>
28
+ <td align="right">0.5115634202957153</td>
29
+ <td align="right">0.5385290213636863</td>
30
+ <td align="right">7.985</td>
31
+ <td align="right">130.62</td>
32
+ <td align="right">16.406</td>
33
+ <td align="right">268</td>
34
+ <td align="right">0.00009280492497114274</td>
35
+ <td align="right">0.4622</td>
36
+ </tr>
37
+ <tr>
38
+ <td align="right">2</td>
39
+ <td align="right">0.4201788902282715</td>
40
+ <td align="right">0.6035894895952164</td>
41
+ <td align="right">8.0283</td>
42
+ <td align="right">129.916</td>
43
+ <td align="right">16.317</td>
44
+ <td align="right">536</td>
45
+ <td align="right">0.00008249326664101577</td>
46
+ <td align="right">0.2823</td>
47
+ </tr>
48
+ <tr>
49
+ <td align="right">3</td>
50
+ <td align="right">0.580650806427002</td>
51
+ <td align="right">0.5574138665741355</td>
52
+ <td align="right">8.1314</td>
53
+ <td align="right">128.268</td>
54
+ <td align="right">16.11</td>
55
+ <td align="right">804</td>
56
+ <td align="right">0.00007218160831088881</td>
57
+ <td align="right">0.1804</td>
58
+ </tr>
59
+ <tr>
60
+ <td align="right">4</td>
61
+ <td align="right">0.4439031779766083</td>
62
+ <td align="right">0.6557697896854868</td>
63
+ <td align="right">8.1435</td>
64
+ <td align="right">128.078</td>
65
+ <td align="right">16.087</td>
66
+ <td align="right">1072</td>
67
+ <td align="right">0.00006186994998076183</td>
68
+ <td align="right">0.1357</td>
69
+ </tr>
70
+ <tr>
71
+ <td align="right">5</td>
72
+ <td align="right">0.5736830830574036</td>
73
+ <td align="right">0.6249925495853809</td>
74
+ <td align="right">8.0533</td>
75
+ <td align="right">129.512</td>
76
+ <td align="right">16.267</td>
77
+ <td align="right">1340</td>
78
+ <td align="right">0.00005155829165063486</td>
79
+ <td align="right">0.0913</td>
80
+ </tr>
81
+ <tr>
82
+ <td align="right">6</td>
83
+ <td align="right">0.7729296684265137</td>
84
+ <td align="right">0.6188970025554703</td>
85
+ <td align="right">8.081</td>
86
+ <td align="right">129.068</td>
87
+ <td align="right">16.211</td>
88
+ <td align="right">1608</td>
89
+ <td align="right">0.000041246633320507885</td>
90
+ <td align="right">0.065</td>
91
+ </tr>
92
+ <tr>
93
+ <td align="right">7</td>
94
+ <td align="right">0.7351673245429993</td>
95
+ <td align="right">0.6405767700619004</td>
96
+ <td align="right">8.1372</td>
97
+ <td align="right">128.176</td>
98
+ <td align="right">16.099</td>
99
+ <td align="right">1876</td>
100
+ <td align="right">0.00003093497499038092</td>
101
+ <td align="right">0.0433</td>
102
+ </tr>
103
+ <tr>
104
+ <td align="right">8</td>
105
+ <td align="right">0.7900031208992004</td>
106
+ <td align="right">0.6565021466238845</td>
107
+ <td align="right">8.1095</td>
108
+ <td align="right">128.615</td>
109
+ <td align="right">16.154</td>
110
+ <td align="right">2144</td>
111
+ <td align="right">0.000020623316660253942</td>
112
+ <td align="right">0.0199</td>
113
+ </tr>
114
+ <tr>
115
+ <td align="right">9</td>
116
+ <td align="right">0.8539554476737976</td>
117
+ <td align="right">0.667660908939119</td>
118
+ <td align="right">8.1204</td>
119
+ <td align="right">128.442</td>
120
+ <td align="right">16.132</td>
121
+ <td align="right">2412</td>
122
+ <td align="right">0.000010311658330126971</td>
123
+ <td align="right">0.0114</td>
124
+ </tr>
125
+ <tr>
126
+ <td align="right">10</td>
127
+ <td align="right">0.9261117577552795</td>
128
+ <td align="right">0.660301076782038</td>
129
+ <td align="right">8.0088</td>
130
+ <td align="right">130.231</td>
131
+ <td align="right">16.357</td>
132
+ <td align="right">2680</td>
133
+ <td align="right">0</td>
134
+ <td align="right">0.0066</td>
135
+ </tr>
136
+ </tbody></table>
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/electra-base-discriminator",
3
+ "architectures": [
4
+ "ElectraForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "embedding_size": 768,
8
+ "finetuning_task": "cola",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "electra",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "summary_activation": "gelu",
23
+ "summary_last_dropout": 0.1,
24
+ "summary_type": "first",
25
+ "summary_use_proj": true,
26
+ "transformers_version": "4.9.0.dev0",
27
+ "type_vocab_size": 2,
28
+ "vocab_size": 30522
29
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b5d037f30c2e171570f7802cb8ea0d77bbeb069f12b3b14ab00bc78f8378899
3
+ size 438020461
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d53d121a81c1fe84198a4a3cc82a81750cf0049062a181a80dd3c31f27e3b009
3
+ size 14503
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "google/electra-base-discriminator", "tokenizer_class": "ElectraTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.667660908939119,
3
+ "best_model_checkpoint": "/content/output_dir/checkpoint-2412",
4
+ "epoch": 9.0,
5
+ "global_step": 2412,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 9.280492497114274e-05,
13
+ "loss": 0.4622,
14
+ "step": 268
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_loss": 0.5115634202957153,
19
+ "eval_matthews_correlation": 0.5385290213636863,
20
+ "eval_runtime": 7.985,
21
+ "eval_samples_per_second": 130.62,
22
+ "eval_steps_per_second": 16.406,
23
+ "step": 268
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 8.249326664101577e-05,
28
+ "loss": 0.2823,
29
+ "step": 536
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_loss": 0.4201788902282715,
34
+ "eval_matthews_correlation": 0.6035894895952164,
35
+ "eval_runtime": 8.0283,
36
+ "eval_samples_per_second": 129.916,
37
+ "eval_steps_per_second": 16.317,
38
+ "step": 536
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 7.218160831088881e-05,
43
+ "loss": 0.1804,
44
+ "step": 804
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_loss": 0.580650806427002,
49
+ "eval_matthews_correlation": 0.5574138665741355,
50
+ "eval_runtime": 8.1314,
51
+ "eval_samples_per_second": 128.268,
52
+ "eval_steps_per_second": 16.11,
53
+ "step": 804
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 6.186994998076183e-05,
58
+ "loss": 0.1357,
59
+ "step": 1072
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_loss": 0.4439031779766083,
64
+ "eval_matthews_correlation": 0.6557697896854868,
65
+ "eval_runtime": 8.1435,
66
+ "eval_samples_per_second": 128.078,
67
+ "eval_steps_per_second": 16.087,
68
+ "step": 1072
69
+ },
70
+ {
71
+ "epoch": 5.0,
72
+ "learning_rate": 5.155829165063486e-05,
73
+ "loss": 0.0913,
74
+ "step": 1340
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_loss": 0.5736830830574036,
79
+ "eval_matthews_correlation": 0.6249925495853809,
80
+ "eval_runtime": 8.0533,
81
+ "eval_samples_per_second": 129.512,
82
+ "eval_steps_per_second": 16.267,
83
+ "step": 1340
84
+ },
85
+ {
86
+ "epoch": 6.0,
87
+ "learning_rate": 4.1246633320507885e-05,
88
+ "loss": 0.065,
89
+ "step": 1608
90
+ },
91
+ {
92
+ "epoch": 6.0,
93
+ "eval_loss": 0.7729296684265137,
94
+ "eval_matthews_correlation": 0.6188970025554703,
95
+ "eval_runtime": 8.081,
96
+ "eval_samples_per_second": 129.068,
97
+ "eval_steps_per_second": 16.211,
98
+ "step": 1608
99
+ },
100
+ {
101
+ "epoch": 7.0,
102
+ "learning_rate": 3.093497499038092e-05,
103
+ "loss": 0.0433,
104
+ "step": 1876
105
+ },
106
+ {
107
+ "epoch": 7.0,
108
+ "eval_loss": 0.7351673245429993,
109
+ "eval_matthews_correlation": 0.6405767700619004,
110
+ "eval_runtime": 8.1372,
111
+ "eval_samples_per_second": 128.176,
112
+ "eval_steps_per_second": 16.099,
113
+ "step": 1876
114
+ },
115
+ {
116
+ "epoch": 8.0,
117
+ "learning_rate": 2.0623316660253942e-05,
118
+ "loss": 0.0199,
119
+ "step": 2144
120
+ },
121
+ {
122
+ "epoch": 8.0,
123
+ "eval_loss": 0.7900031208992004,
124
+ "eval_matthews_correlation": 0.6565021466238845,
125
+ "eval_runtime": 8.1095,
126
+ "eval_samples_per_second": 128.615,
127
+ "eval_steps_per_second": 16.154,
128
+ "step": 2144
129
+ },
130
+ {
131
+ "epoch": 9.0,
132
+ "learning_rate": 1.0311658330126971e-05,
133
+ "loss": 0.0114,
134
+ "step": 2412
135
+ },
136
+ {
137
+ "epoch": 9.0,
138
+ "eval_loss": 0.8539554476737976,
139
+ "eval_matthews_correlation": 0.667660908939119,
140
+ "eval_runtime": 8.1204,
141
+ "eval_samples_per_second": 128.442,
142
+ "eval_steps_per_second": 16.132,
143
+ "step": 2412
144
+ }
145
+ ],
146
+ "max_steps": 2680,
147
+ "num_train_epochs": 10,
148
+ "total_flos": 6470985270606336.0,
149
+ "trial_name": null,
150
+ "trial_params": null
151
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0488020c92e79d33659cff1c0a772cca0a5f12e7d84470b1f7d6303dcb68dd1a
3
+ size 2607
vocab.txt ADDED
The diff for this file is too large to render. See raw diff