Datasets:
Tasks:
Text Retrieval
Sub-tasks:
entity-linking-retrieval
Languages:
Chinese
Size:
1M - 10M
ArXiv:
License:
File size: 6,231 Bytes
bc46cd3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
# coding=utf-8
"""Hansel: A Chinese Few-Shot and Zero-Shot Entity Linking Benchmark"""
import json
import os
import datasets
_HANSEL_CITATION = """\
@misc{xu2022hansel,
title = {Hansel: A Chinese Few-Shot and Zero-Shot Entity Linking Benchmark},
author = {Xu, Zhenran and Shan, Zifei and Li, Yuxin and Hu, Baotian and Qin, Bing},
publisher = {arXiv},
year = {2022},
url = {https://arxiv.org/abs/2207.13005}
}
"""
_HANSEL_DESCRIPTION = """\
Hansel is a high-quality human-annotated Chinese entity linking (EL) dataset, used for testing Chinese EL systems' generalization ability to tail entities and emerging entities.
The test set contains Few-shot (FS) and zero-shot (ZS) slices, has 10K examples and uses Wikidata as the corresponding knowledge base.
The training and validation sets are from Wikipedia hyperlinks, useful for large-scale pretraining of Chinese EL systems.
"""
_URLS = {
"train": "hansel-train.jsonl",
"val": "hansel-val.jsonl",
"hansel-fs": "hansel-few-shot-v1.jsonl",
"hansel-zs": "hansel-zero-shot-v1.jsonl",
}
logger = datasets.logging.get_logger(__name__)
class HanselConfig(datasets.BuilderConfig):
"""BuilderConfig for HanselConfig."""
def __init__(self, features, data_url, citation, url, **kwargs):
"""BuilderConfig for Hansel.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
**kwargs: keyword arguments forwarded to super.
"""
super(HanselConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.data_url = data_url
self.citation = citation
self.url = url
class Hansel(datasets.GeneratorBasedBuilder):
"""The Hansel benchmark."""
BUILDER_CONFIGS = [
HanselConfig(
name="wiki",
description=_HANSEL_DESCRIPTION,
features=["id", "text", "start", "end", "mention", "gold_id"],
data_url="https://huggingface.co./datasets/HIT-TMG/Hansel/blob/main/",
citation=_HANSEL_CITATION,
url="https://github.com/HITsz-TMG/Hansel",
)
HanselConfig(
name="hansel-few-shot",
description=_HANSEL_DESCRIPTION,
features=["id", "text", "start", "end", "mention", "gold_id", "source", "domain"],
data_url="https://huggingface.co./datasets/HIT-TMG/Hansel/blob/main/",
citation=_HANSEL_CITATION,
url="https://github.com/HITsz-TMG/Hansel",
)
HanselConfig(
name="hansel-zero-shot",
description=_HANSEL_DESCRIPTION,
features=["id", "text", "start", "end", "mention", "gold_id", "source", "domain"],
data_url="https://huggingface.co./datasets/HIT-TMG/Hansel/blob/main/",
citation=_HANSEL_CITATION,
url="https://github.com/HITsz-TMG/Hansel",
)
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
features["start"] = datasets.Value("int64")
features["end"] = datasets.Value("int64")
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation
)
def _split_generators(self, dl_manager):
urls_to_download = self._URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
if "hansel-few" in self.config.name:
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": downloaded_files["hansel-fs"]),
"split": datasets.Split.TEST,
},
),
]
if "hansel-zero" in self.config.name:
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": downloaded_files["hansel-zs"],
"split": datasets.Split.TEST,
},
),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": downloaded_files["train"],
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": downloaded_files["val"],
"split": datasets.Split.VALIDATION,
},
),
]
def _generate_examples(self, data_file, split):
logger.info("generating examples from = %s", data_file)
with open(data_file, encoding="utf-8") as f:
for line in f:
temDict = json.loads(line)
key = temDict["id"]
if "hansel" in self.config.name:
yield key, {
"text": temDict["text"],
"start": temDict["start"],
"end": temDict["end"],
"mention": temDict["mention"],
"gold_id": temDict["gold_id"],
"source": temDict["source"],
"domain": temDict["domain"],
}
else:
yield key, {
"text": temDict["text"],
"start": temDict["start"],
"end": temDict["end"],
"mention": temDict["mention"],
"gold_id": temDict["gold_id"],
}
|