Matej Klemen commited on
Commit
293de77
1 Parent(s): 49813b1

Add first dataset script

Browse files
Files changed (2) hide show
  1. README.md +28 -0
  2. coref149.py +135 -0
README.md CHANGED
@@ -1,3 +1,31 @@
1
  ---
2
  license: cc-by-nc-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: cc-by-nc-sa-4.0
3
+ dataset_info:
4
+ features:
5
+ - name: id_doc
6
+ dtype: string
7
+ - name: words
8
+ sequence:
9
+ sequence: string
10
+ - name: mentions
11
+ list:
12
+ - name: id_mention
13
+ dtype: string
14
+ - name: mention_data
15
+ struct:
16
+ - name: idx_sent
17
+ dtype: uint32
18
+ - name: word_indices
19
+ sequence: uint32
20
+ - name: global_word_indices
21
+ sequence: uint32
22
+ - name: coref_clusters
23
+ sequence:
24
+ sequence: string
25
+ splits:
26
+ - name: train
27
+ num_bytes: 413196
28
+ num_examples: 149
29
+ download_size: 463706
30
+ dataset_size: 413196
31
  ---
coref149.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Slovene corpus for coreference resolution coref149. """
2
+
3
+
4
+ import os
5
+ import xml.etree.ElementTree as ET
6
+ import datasets
7
+
8
+
9
+ _CITATION = """\
10
+ @article{coref149,
11
+ author={Žitnik, Slavko and Bajec, Marko},
12
+ title={Odkrivanje koreferenčnosti v slovenskem jeziku na označenih besedilih iz coref149},
13
+ journal={Slovenščina 2.0: empirične, aplikativne in interdisciplinarne raziskave},
14
+ number={1},
15
+ volume={6},
16
+ year={2018},
17
+ month={Jun.},
18
+ pages={37–67},
19
+ doi={10.4312/slo2.0.2018.1.37-67}
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+ Slovene corpus for coreference resolution. Contains manually annotated coreferences.
25
+ """
26
+
27
+ _HOMEPAGE = "http://hdl.handle.net/11356/1182"
28
+
29
+ _LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
30
+
31
+ _URLS = {
32
+ "coref149": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1182/coref149_v1.0.zip"
33
+ }
34
+
35
+
36
+ class Coref149(datasets.GeneratorBasedBuilder):
37
+ """Slovene corpus for coreference resolution."""
38
+
39
+ VERSION = datasets.Version("1.0.0")
40
+
41
+ def _info(self):
42
+ features = datasets.Features(
43
+ {
44
+ "id_doc": datasets.Value("string"),
45
+ "words": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
46
+ "mentions": [{
47
+ "id_mention": datasets.Value("string"),
48
+ "mention_data": {
49
+ "idx_sent": datasets.Value("uint32"),
50
+ "word_indices": datasets.Sequence(datasets.Value("uint32")),
51
+ "global_word_indices": datasets.Sequence(datasets.Value("uint32"))
52
+ }
53
+ }],
54
+ "coref_clusters": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
55
+ }
56
+ )
57
+
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=features,
61
+ homepage=_HOMEPAGE,
62
+ license=_LICENSE,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ urls = _URLS["coref149"]
68
+ data_dir = dl_manager.download_and_extract(urls)
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "data_dir": data_dir
74
+ }
75
+ )
76
+ ]
77
+
78
+ def _generate_examples(self, data_dir):
79
+ TC_NAMESPACE = "{http://www.dspin.de/data/textcorpus}"
80
+ all_files = sorted([fname for fname in os.listdir(data_dir) if fname.endswith(".tcf")],
81
+ key=lambda _fname: int(_fname.split(".")[-2]))
82
+
83
+ for idx_file, curr_fname in enumerate(all_files):
84
+ curr_doc = ET.parse(os.path.join(data_dir, curr_fname))
85
+ root = curr_doc.getroot()
86
+ id_doc = curr_fname.split(os.path.sep)[-1]
87
+
88
+ token_tags = root.findall(f".//{TC_NAMESPACE}token")
89
+ id2tok, id2idx, id2globidx, id2sentidx = {}, {}, {}, {}
90
+ for idx_global, token in enumerate(token_tags):
91
+ id_token = token.attrib["ID"]
92
+ text_token = token.text.strip()
93
+
94
+ id2tok[id_token] = text_token
95
+ id2globidx[id_token] = idx_global
96
+
97
+ sent_tags = root.findall(f".//{TC_NAMESPACE}sentence")
98
+ words = []
99
+ for idx_sent, sent in enumerate(sent_tags):
100
+ token_ids = sent.attrib["tokenIDs"].split(" ")
101
+ for local_position, _id_tok in enumerate(token_ids):
102
+ id2sentidx[_id_tok] = idx_sent
103
+ id2idx[_id_tok] = local_position
104
+ words.append([id2tok[_id] for _id in token_ids])
105
+
106
+ mentions, clusters = [], []
107
+ for ent in root.findall(f".//{TC_NAMESPACE}entity"):
108
+ curr_cluster = []
109
+ for ref in ent.findall(f"{TC_NAMESPACE}reference"):
110
+ id_mention = f"{id_doc}.{ref.attrib['ID']}"
111
+ curr_cluster.append(id_mention)
112
+ curr_mention = {
113
+ "id_mention": id_mention,
114
+ "mention_data": {
115
+ "idx_sent": None,
116
+ "word_indices": [],
117
+ "global_word_indices": []
118
+ }
119
+ }
120
+
121
+ for id_token in ref.attrib['tokenIDs'].split(" "):
122
+ curr_mention["mention_data"]["idx_sent"] = id2sentidx[id_token]
123
+ curr_mention["mention_data"]["word_indices"].append(id2idx[id_token])
124
+ curr_mention["mention_data"]["global_word_indices"].append(id2globidx[id_token])
125
+
126
+ mentions.append(curr_mention)
127
+
128
+ clusters.append(curr_cluster)
129
+
130
+ yield idx_file, {
131
+ "id_doc": id_doc,
132
+ "words": words,
133
+ "mentions": mentions,
134
+ "coref_clusters": clusters
135
+ }