model update
Browse files- README.md +12 -0
- analogy.bidirection.json +1 -1
- analogy.forward.json +1 -1
- analogy.reverse.json +1 -1
- config.json +1 -1
- tokenizer_config.json +1 -1
README.md
CHANGED
@@ -103,6 +103,17 @@ model-index:
|
|
103 |
- name: Accuracy
|
104 |
type: accuracy
|
105 |
value: 0.5081967213114754
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
- task:
|
107 |
name: Lexical Relation Classification (BLESS)
|
108 |
type: classification
|
@@ -188,6 +199,7 @@ This model achieves the following results on the relation understanding tasks:
|
|
188 |
- Accuracy on Google: 0.934
|
189 |
- Accuracy on ConceptNet Analogy: 0.3674496644295302
|
190 |
- Accuracy on T-Rex Analogy: 0.5081967213114754
|
|
|
191 |
- Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-large-nce-e-semeval2012/raw/main/classification.json)):
|
192 |
- Micro F1 score on BLESS: 0.9213500075335241
|
193 |
- Micro F1 score on CogALexV: 0.8551643192488263
|
|
|
103 |
- name: Accuracy
|
104 |
type: accuracy
|
105 |
value: 0.5081967213114754
|
106 |
+
- task:
|
107 |
+
name: Analogy Questions (NELL-ONE Analogy)
|
108 |
+
type: multiple-choice-qa
|
109 |
+
dataset:
|
110 |
+
name: NELL-ONE Analogy
|
111 |
+
args: relbert/analogy_questions
|
112 |
+
type: analogy-questions
|
113 |
+
metrics:
|
114 |
+
- name: Accuracy
|
115 |
+
type: accuracy
|
116 |
+
value: 0.61
|
117 |
- task:
|
118 |
name: Lexical Relation Classification (BLESS)
|
119 |
type: classification
|
|
|
199 |
- Accuracy on Google: 0.934
|
200 |
- Accuracy on ConceptNet Analogy: 0.3674496644295302
|
201 |
- Accuracy on T-Rex Analogy: 0.5081967213114754
|
202 |
+
- Accuracy on NELL-ONE Analogy: 0.61
|
203 |
- Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-large-nce-e-semeval2012/raw/main/classification.json)):
|
204 |
- Micro F1 score on BLESS: 0.9213500075335241
|
205 |
- Micro F1 score on CogALexV: 0.8551643192488263
|
analogy.bidirection.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"sat_full/test": 0.660427807486631, "sat/test": 0.6646884272997032, "u2/test": 0.6885964912280702, "u4/test": 0.6620370370370371, "google/test": 0.954, "bats/test": 0.8104502501389661, "t_rex_relational_similarity/test": 0.6010928961748634, "conceptnet_relational_similarity/test": 0.4312080536912752, "sat/validation": 0.6216216216216216, "u2/validation": 0.6666666666666666, "u4/validation": 0.625, "google/validation": 0.96, "bats/validation": 0.8592964824120602, "semeval2012_relational_similarity/validation": 0.6962025316455697, "t_rex_relational_similarity/validation": 0.26814516129032256, "conceptnet_relational_similarity/validation": 0.36241007194244607}
|
|
|
1 |
+
{"sat_full/test": 0.660427807486631, "sat/test": 0.6646884272997032, "u2/test": 0.6885964912280702, "u4/test": 0.6620370370370371, "google/test": 0.954, "bats/test": 0.8104502501389661, "t_rex_relational_similarity/test": 0.6010928961748634, "conceptnet_relational_similarity/test": 0.4312080536912752, "sat/validation": 0.6216216216216216, "u2/validation": 0.6666666666666666, "u4/validation": 0.625, "google/validation": 0.96, "bats/validation": 0.8592964824120602, "semeval2012_relational_similarity/validation": 0.6962025316455697, "t_rex_relational_similarity/validation": 0.26814516129032256, "conceptnet_relational_similarity/validation": 0.36241007194244607, "nell_relational_similarity/test": 0.6483333333333333, "nell_relational_similarity/validation": 0.5175}
|
analogy.forward.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"semeval2012_relational_similarity/validation": 0.7341772151898734, "sat_full/test": 0.6203208556149733, "sat/test": 0.6261127596439169, "u2/test": 0.6403508771929824, "u4/test": 0.6157407407407407, "google/test": 0.934, "bats/test": 0.7787659811006115, "t_rex_relational_similarity/test": 0.5081967213114754, "conceptnet_relational_similarity/test": 0.3674496644295302, "sat/validation": 0.5675675675675675, "u2/validation": 0.625, "u4/validation": 0.5833333333333334, "google/validation": 1.0, "bats/validation": 0.8241206030150754, "t_rex_relational_similarity/validation": 0.2399193548387097, "conceptnet_relational_similarity/validation": 0.3183453237410072}
|
|
|
1 |
+
{"semeval2012_relational_similarity/validation": 0.7341772151898734, "sat_full/test": 0.6203208556149733, "sat/test": 0.6261127596439169, "u2/test": 0.6403508771929824, "u4/test": 0.6157407407407407, "google/test": 0.934, "bats/test": 0.7787659811006115, "t_rex_relational_similarity/test": 0.5081967213114754, "conceptnet_relational_similarity/test": 0.3674496644295302, "sat/validation": 0.5675675675675675, "u2/validation": 0.625, "u4/validation": 0.5833333333333334, "google/validation": 1.0, "bats/validation": 0.8241206030150754, "t_rex_relational_similarity/validation": 0.2399193548387097, "conceptnet_relational_similarity/validation": 0.3183453237410072, "nell_relational_similarity/test": 0.61, "nell_relational_similarity/validation": 0.46}
|
analogy.reverse.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"sat_full/test": 0.6550802139037433, "sat/test": 0.6646884272997032, "u2/test": 0.6754385964912281, "u4/test": 0.6527777777777778, "google/test": 0.946, "bats/test": 0.773763201778766, "t_rex_relational_similarity/test": 0.5956284153005464, "conceptnet_relational_similarity/test": 0.4186241610738255, "sat/validation": 0.5675675675675675, "u2/validation": 0.5833333333333334, "u4/validation": 0.5833333333333334, "google/validation": 0.92, "bats/validation": 0.8341708542713567, "semeval2012_relational_similarity/validation": 0.6582278481012658, "t_rex_relational_similarity/validation": 0.2661290322580645, "conceptnet_relational_similarity/validation": 0.32014388489208634}
|
|
|
1 |
+
{"sat_full/test": 0.6550802139037433, "sat/test": 0.6646884272997032, "u2/test": 0.6754385964912281, "u4/test": 0.6527777777777778, "google/test": 0.946, "bats/test": 0.773763201778766, "t_rex_relational_similarity/test": 0.5956284153005464, "conceptnet_relational_similarity/test": 0.4186241610738255, "sat/validation": 0.5675675675675675, "u2/validation": 0.5833333333333334, "u4/validation": 0.5833333333333334, "google/validation": 0.92, "bats/validation": 0.8341708542713567, "semeval2012_relational_similarity/validation": 0.6582278481012658, "t_rex_relational_similarity/validation": 0.2661290322580645, "conceptnet_relational_similarity/validation": 0.32014388489208634, "nell_relational_similarity/test": 0.6483333333333333, "nell_relational_similarity/validation": 0.5375}
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "roberta-large",
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
tokenizer_config.json
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
"errors": "replace",
|
7 |
"mask_token": "<mask>",
|
8 |
"model_max_length": 512,
|
9 |
-
"name_or_path": "
|
10 |
"pad_token": "<pad>",
|
11 |
"sep_token": "</s>",
|
12 |
"special_tokens_map_file": null,
|
|
|
6 |
"errors": "replace",
|
7 |
"mask_token": "<mask>",
|
8 |
"model_max_length": 512,
|
9 |
+
"name_or_path": "roberta-large",
|
10 |
"pad_token": "<pad>",
|
11 |
"sep_token": "</s>",
|
12 |
"special_tokens_map_file": null,
|