mathiascreutz
commited on
Commit
•
391d4a9
1
Parent(s):
22ad057
Adding training sets
Browse files- opusparcus.py +23 -3
opusparcus.py
CHANGED
@@ -19,6 +19,7 @@ import csv
|
|
19 |
import json
|
20 |
import os
|
21 |
import datasets
|
|
|
22 |
|
23 |
# Add BibTeX citation
|
24 |
|
@@ -105,7 +106,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
|
|
105 |
"sent2": datasets.Value("string"),
|
106 |
"annot_score": datasets.Value("float"),
|
107 |
"gem_id": datasets.Value("string"),
|
108 |
-
|
109 |
}
|
110 |
)
|
111 |
|
@@ -190,16 +191,35 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
|
|
190 |
""" Yields examples as (key, example) tuples. """
|
191 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
192 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
193 |
-
if split == datasets.Split.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
with open(filepath, encoding="utf-8") as f:
|
195 |
for id_, row in enumerate(f):
|
196 |
data = json.loads(row)
|
197 |
if data["lang"] == lang:
|
198 |
yield id_, {
|
199 |
-
"lang": data["lang"]
|
200 |
"sent1": data["sent1"],
|
201 |
"sent2": data["sent2"],
|
202 |
"annot_score": data["annot_score"],
|
203 |
"gem_id": data["gem_id"],
|
|
|
204 |
}
|
205 |
|
|
|
19 |
import json
|
20 |
import os
|
21 |
import datasets
|
22 |
+
import bz2
|
23 |
|
24 |
# Add BibTeX citation
|
25 |
|
|
|
106 |
"sent2": datasets.Value("string"),
|
107 |
"annot_score": datasets.Value("float"),
|
108 |
"gem_id": datasets.Value("string"),
|
109 |
+
"quality": datasets.Value("uint8")
|
110 |
}
|
111 |
)
|
112 |
|
|
|
191 |
""" Yields examples as (key, example) tuples. """
|
192 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
193 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
194 |
+
if split == datasets.Split.TRAIN:
|
195 |
+
with bz2.open(filepath, "rt", encoding="utf-8") as f:
|
196 |
+
# We know that this file only contains the desired language,
|
197 |
+
# because for the training sets the languages are in separate
|
198 |
+
# files, and only the desired language has been downloaded
|
199 |
+
for id_, row in enumerate(f):
|
200 |
+
data = json.loads(row)
|
201 |
+
if data["quality"] < self.quality:
|
202 |
+
# The rest of this file contains too low quality data
|
203 |
+
break
|
204 |
+
yield id_, {
|
205 |
+
"lang": data["lang"],
|
206 |
+
"sent1": data["sent1"],
|
207 |
+
"sent2": data["sent2"],
|
208 |
+
"annot_score": None,
|
209 |
+
"gem_id": data["gem_id"],
|
210 |
+
"quality": data["quality"],
|
211 |
+
}
|
212 |
+
else:
|
213 |
with open(filepath, encoding="utf-8") as f:
|
214 |
for id_, row in enumerate(f):
|
215 |
data = json.loads(row)
|
216 |
if data["lang"] == lang:
|
217 |
yield id_, {
|
218 |
+
"lang": data["lang"],
|
219 |
"sent1": data["sent1"],
|
220 |
"sent2": data["sent2"],
|
221 |
"annot_score": data["annot_score"],
|
222 |
"gem_id": data["gem_id"],
|
223 |
+
"quality": 100,
|
224 |
}
|
225 |
|