ArneBinder commited on
Commit
c73ae7d
·
1 Parent(s): 67a4e57

pie-datasets 0.4.0

Browse files
Files changed (2) hide show
  1. brat.py +3 -282
  2. requirements.txt +1 -1
brat.py CHANGED
@@ -1,284 +1,5 @@
1
- import logging
2
- from collections import defaultdict
3
- from typing import Any, Dict, List, Optional, Tuple, Union
4
 
5
- import datasets
6
- from pytorch_ie.annotations import BinaryRelation, LabeledMultiSpan, LabeledSpan
7
- from pytorch_ie.core import Annotation
8
 
9
- from pie_datasets import GeneratorBasedBuilder
10
- from pie_datasets.document.types import (
11
- Attribute,
12
- BratDocument,
13
- BratDocumentWithMergedSpans,
14
- )
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- def dl2ld(dict_of_lists: Dict[str, List[Any]]) -> List[Dict[str, Any]]:
20
- return [dict(zip(dict_of_lists, t)) for t in zip(*dict_of_lists.values())]
21
-
22
-
23
- def ld2dl(
24
- list_fo_dicts: List[Dict[str, Any]], keys: Optional[List[str]] = None
25
- ) -> Dict[str, List[Any]]:
26
- keys = keys or list(list_fo_dicts[0])
27
- return {k: [dic[k] for dic in list_fo_dicts] for k in keys}
28
-
29
-
30
- def example_to_document(
31
- example: Dict[str, Any], merge_fragmented_spans: bool = False
32
- ) -> Union[BratDocument, BratDocumentWithMergedSpans]:
33
- if merge_fragmented_spans:
34
- doc = BratDocumentWithMergedSpans(text=example["context"], id=example["file_name"])
35
- else:
36
- doc = BratDocument(text=example["context"], id=example["file_name"])
37
-
38
- spans: Dict[str, LabeledSpan] = dict()
39
- span_locations: List[Tuple[Tuple[int, int]]] = []
40
- span_texts: List[str] = []
41
- for span_dict in dl2ld(example["spans"]):
42
- starts: List[int] = span_dict["locations"]["start"]
43
- ends: List[int] = span_dict["locations"]["end"]
44
- slices = tuple(zip(starts, ends))
45
- span_locations.append(slices)
46
- span_texts.append(span_dict["text"])
47
- # sanity check
48
- span_text_parts = [doc.text[start:end] for start, end in slices]
49
- joined_span_texts_stripped = " ".join(span_text_parts).strip()
50
- span_text_stripped = span_dict["text"].strip()
51
- if joined_span_texts_stripped != span_text_stripped:
52
- logger.warning(
53
- f"joined span parts do not match stripped span text field content. "
54
- f'joined_span_texts_stripped: "{joined_span_texts_stripped}" != stripped "text": "{span_text_stripped}"'
55
- )
56
- if merge_fragmented_spans:
57
- if len(starts) > 1:
58
- # check if the text in between the fragments holds only space
59
- merged_content_texts = [
60
- doc.text[start:end] for start, end in zip(ends[:-1], starts[1:])
61
- ]
62
- merged_content_texts_not_empty = [
63
- text.strip() for text in merged_content_texts if text.strip() != ""
64
- ]
65
- if len(merged_content_texts_not_empty) > 0:
66
- logger.warning(
67
- f"document '{doc.id}' contains a non-contiguous span with text content in between "
68
- f"(will be merged into a single span): "
69
- f"newly covered text parts: {merged_content_texts_not_empty}, "
70
- f"merged span text: '{doc.text[starts[0]:ends[-1]]}', "
71
- f"annotation: {span_dict}"
72
- )
73
- # just take everything
74
- start = min(starts)
75
- end = max(ends)
76
- span = LabeledSpan(start=start, end=end, label=span_dict["type"])
77
- else:
78
- span = LabeledMultiSpan(slices=slices, label=span_dict["type"])
79
- spans[span_dict["id"]] = span
80
-
81
- doc.spans.extend(spans.values())
82
- doc.metadata["span_ids"] = list(spans.keys())
83
- doc.metadata["span_locations"] = span_locations
84
- doc.metadata["span_texts"] = span_texts
85
-
86
- relations: Dict[str, BinaryRelation] = dict()
87
- for rel_dict in dl2ld(example["relations"]):
88
- arguments = dict(zip(rel_dict["arguments"]["type"], rel_dict["arguments"]["target"]))
89
- assert set(arguments) == {"Arg1", "Arg2"}
90
- head = spans[arguments["Arg1"]]
91
- tail = spans[arguments["Arg2"]]
92
- rel = BinaryRelation(head=head, tail=tail, label=rel_dict["type"])
93
- relations[rel_dict["id"]] = rel
94
-
95
- doc.relations.extend(relations.values())
96
- doc.metadata["relation_ids"] = list(relations.keys())
97
-
98
- equivalence_relations = dl2ld(example["equivalence_relations"])
99
- if len(equivalence_relations) > 0:
100
- raise NotImplementedError("converting equivalence_relations is not yet implemented")
101
-
102
- events = dl2ld(example["events"])
103
- if len(events) > 0:
104
- raise NotImplementedError("converting events is not yet implemented")
105
-
106
- attribute_annotations: Dict[str, Dict[str, Attribute]] = defaultdict(dict)
107
- attribute_ids = []
108
- for attribute_dict in dl2ld(example["attributions"]):
109
- target_id = attribute_dict["target"]
110
- if target_id in spans:
111
- target_layer_name = "spans"
112
- annotation = spans[target_id]
113
- elif target_id in relations:
114
- target_layer_name = "relations"
115
- annotation = relations[target_id]
116
- else:
117
- raise Exception("only span and relation attributes are supported yet")
118
- attribute = Attribute(
119
- annotation=annotation,
120
- label=attribute_dict["type"],
121
- value=attribute_dict["value"],
122
- )
123
- attribute_annotations[target_layer_name][attribute_dict["id"]] = attribute
124
- attribute_ids.append((target_layer_name, attribute_dict["id"]))
125
-
126
- doc.span_attributes.extend(attribute_annotations["spans"].values())
127
- doc.relation_attributes.extend(attribute_annotations["relations"].values())
128
- doc.metadata["attribute_ids"] = attribute_ids
129
-
130
- normalizations = dl2ld(example["normalizations"])
131
- if len(normalizations) > 0:
132
- raise NotImplementedError("converting normalizations is not yet implemented")
133
-
134
- notes = dl2ld(example["notes"])
135
- if len(notes) > 0:
136
- raise NotImplementedError("converting notes is not yet implemented")
137
-
138
- return doc
139
-
140
-
141
- def document_to_example(
142
- document: Union[BratDocument, BratDocumentWithMergedSpans]
143
- ) -> Dict[str, Any]:
144
- example = {
145
- "context": document.text,
146
- "file_name": document.id,
147
- }
148
- span_dicts: Dict[Union[LabeledSpan, LabeledMultiSpan], Dict[str, Any]] = dict()
149
- assert len(document.metadata["span_locations"]) == len(document.spans)
150
- assert len(document.metadata["span_texts"]) == len(document.spans)
151
- assert len(document.metadata["span_ids"]) == len(document.spans)
152
- for i, span in enumerate(document.spans):
153
- locations = tuple((start, end) for start, end in document.metadata["span_locations"][i])
154
- if isinstance(span, LabeledSpan):
155
- assert locations[0][0] == span.start
156
- assert locations[-1][1] == span.end
157
- elif isinstance(span, LabeledMultiSpan):
158
- assert span.slices == locations
159
- else:
160
- raise TypeError(f"span has unknown type [{type(span)}]: {span}")
161
-
162
- starts, ends = zip(*locations)
163
- span_dict = {
164
- "id": document.metadata["span_ids"][i],
165
- "locations": {
166
- "start": list(starts),
167
- "end": list(ends),
168
- },
169
- "text": document.metadata["span_texts"][i],
170
- "type": span.label,
171
- }
172
- if span in span_dicts:
173
- prev_ann_dict = span_dicts[span]
174
- ann_dict = span_dict
175
- logger.warning(
176
- f"document {document.id}: annotation exists twice: {prev_ann_dict['id']} and {ann_dict['id']} "
177
- f"are identical"
178
- )
179
- span_dicts[span] = span_dict
180
- example["spans"] = ld2dl(list(span_dicts.values()), keys=["id", "type", "locations", "text"])
181
-
182
- relation_dicts: Dict[BinaryRelation, Dict[str, Any]] = dict()
183
- assert len(document.metadata["relation_ids"]) == len(document.relations)
184
- for i, rel in enumerate(document.relations):
185
- arg1_id = span_dicts[rel.head]["id"]
186
- arg2_id = span_dicts[rel.tail]["id"]
187
- relation_dict = {
188
- "id": document.metadata["relation_ids"][i],
189
- "type": rel.label,
190
- "arguments": {
191
- "type": ["Arg1", "Arg2"],
192
- "target": [arg1_id, arg2_id],
193
- },
194
- }
195
- if rel in relation_dicts:
196
- prev_ann_dict = relation_dicts[rel]
197
- ann_dict = relation_dict
198
- logger.warning(
199
- f"document {document.id}: annotation exists twice: {prev_ann_dict['id']} and {ann_dict['id']} "
200
- f"are identical"
201
- )
202
- relation_dicts[rel] = relation_dict
203
-
204
- example["relations"] = ld2dl(list(relation_dicts.values()), keys=["id", "type", "arguments"])
205
-
206
- example["equivalence_relations"] = ld2dl([], keys=["type", "targets"])
207
- example["events"] = ld2dl([], keys=["id", "type", "trigger", "arguments"])
208
-
209
- annotation_dicts = {
210
- "spans": span_dicts,
211
- "relations": relation_dicts,
212
- }
213
- all_attribute_annotations = {
214
- "spans": document.span_attributes,
215
- "relations": document.relation_attributes,
216
- }
217
- attribute_dicts: Dict[Annotation, Dict[str, Any]] = dict()
218
- attribute_ids_per_target = defaultdict(list)
219
- for target_layer, attribute_id in document.metadata["attribute_ids"]:
220
- attribute_ids_per_target[target_layer].append(attribute_id)
221
-
222
- for target_layer, attribute_ids in attribute_ids_per_target.items():
223
- attribute_annotations = all_attribute_annotations[target_layer]
224
- assert len(attribute_ids) == len(attribute_annotations)
225
- for i, attribute_annotation in enumerate(attribute_annotations):
226
- target_id = annotation_dicts[target_layer][attribute_annotation.annotation]["id"]
227
- attribute_dict = {
228
- "id": attribute_ids_per_target[target_layer][i],
229
- "type": attribute_annotation.label,
230
- "target": target_id,
231
- "value": attribute_annotation.value,
232
- }
233
- if attribute_annotation in attribute_dicts:
234
- prev_ann_dict = attribute_dicts[attribute_annotation]
235
- ann_dict = attribute_annotation
236
- logger.warning(
237
- f"document {document.id}: annotation exists twice: {prev_ann_dict['id']} and {ann_dict['id']} "
238
- f"are identical"
239
- )
240
- attribute_dicts[attribute_annotation] = attribute_dict
241
-
242
- example["attributions"] = ld2dl(
243
- list(attribute_dicts.values()), keys=["id", "type", "target", "value"]
244
- )
245
- example["normalizations"] = ld2dl(
246
- [], keys=["id", "type", "target", "resource_id", "entity_id"]
247
- )
248
- example["notes"] = ld2dl([], keys=["id", "type", "target", "note"])
249
-
250
- return example
251
-
252
-
253
- class BratConfig(datasets.BuilderConfig):
254
- """BuilderConfig for BratDatasetLoader."""
255
-
256
- def __init__(self, merge_fragmented_spans: bool = False, **kwargs):
257
- """BuilderConfig for DocRED.
258
-
259
- Args:
260
- **kwargs: keyword arguments forwarded to super.
261
- """
262
- super().__init__(**kwargs)
263
- self.merge_fragmented_spans = merge_fragmented_spans
264
-
265
-
266
- class BratDatasetLoader(GeneratorBasedBuilder):
267
- # this requires https://github.com/ChristophAlt/pytorch-ie/pull/288
268
- DOCUMENT_TYPES = {
269
- "default": BratDocument,
270
- "merge_fragmented_spans": BratDocumentWithMergedSpans,
271
- }
272
-
273
- DEFAULT_CONFIG_NAME = "default"
274
- BUILDER_CONFIGS = [
275
- BratConfig(name="default"),
276
- BratConfig(name="merge_fragmented_spans", merge_fragmented_spans=True),
277
- ]
278
-
279
- BASE_DATASET_PATH = "DFKI-SLT/brat"
280
-
281
- def _generate_document(self, example, **kwargs):
282
- return example_to_document(
283
- example, merge_fragmented_spans=self.config.merge_fragmented_spans
284
- )
 
1
+ from pie_datasets.builders import BratBuilder
 
 
2
 
 
 
 
3
 
4
+ class Brat(BratBuilder):
5
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1 @@
1
- pie-datasets>=0.3.1
 
1
+ pie-datasets>=0.4.0,<0.5.0