Ronald Cardenas Acosta
commited on
Commit
•
031583f
1
Parent(s):
5e8924f
new language pairs zh-* and *-zh
Browse files- .gitattributes +2 -0
- README.md +1 -1
- test/zh.jsonl +2 -2
- train/zh.jsonl +3 -0
- valid/zh.jsonl +3 -0
- xwikis.py +6 -37
.gitattributes
CHANGED
@@ -93,3 +93,5 @@ valid/cs-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
93 |
valid/de-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
94 |
valid/en-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
95 |
valid/fr-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
93 |
valid/de-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
94 |
valid/en-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
95 |
valid/fr-zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
96 |
+
train/zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
97 |
+
valid/zh.jsonl filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -120,7 +120,7 @@ yes
|
|
120 |
<!-- quick -->
|
121 |
<!-- info: What languages/dialects are covered in the dataset? -->
|
122 |
<!-- scope: telescope -->
|
123 |
-
`German`, `English`, `French`, `Czech`
|
124 |
|
125 |
#### License
|
126 |
|
|
|
120 |
<!-- quick -->
|
121 |
<!-- info: What languages/dialects are covered in the dataset? -->
|
122 |
<!-- scope: telescope -->
|
123 |
+
`German`, `English`, `French`, `Czech`, `Chinese`
|
124 |
|
125 |
#### License
|
126 |
|
test/zh.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef69fcfc6ed855373055a59303ea892f0d46f115688aa883e8a3d492d31898f2
|
3 |
+
size 37214675
|
train/zh.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:344beffc3daf2abd9535956fd687ab43ea893bfa95191605f0f98ce009d57de4
|
3 |
+
size 1065902922
|
valid/zh.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2a784e4d7c1deabe877db71a3978b8194f80e84d616d1e1a544745752a2fa60
|
3 |
+
size 96364048
|
xwikis.py
CHANGED
@@ -18,7 +18,7 @@
|
|
18 |
import csv
|
19 |
import json
|
20 |
import os
|
21 |
-
|
22 |
import datasets
|
23 |
|
24 |
|
@@ -51,47 +51,16 @@ VERSION = datasets.Version("0.1.0")
|
|
51 |
# TODO: Add link to the official dataset URLs here
|
52 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
53 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
|
|
54 |
|
55 |
LPAIRS = [
|
56 |
-
f"{x}-{y}" for x
|
57 |
]
|
58 |
|
59 |
_URLs = {
|
60 |
-
"train": [ f"./train/{xy}.jsonl" for xy in LPAIRS],
|
61 |
-
|
62 |
-
|
63 |
-
# "./train/en-de.jsonl",
|
64 |
-
# "./train/de-en.jsonl",
|
65 |
-
# "./train/en-cs.jsonl",
|
66 |
-
# "./train/cs-en.jsonl",
|
67 |
-
# "./train/fr-de.jsonl",
|
68 |
-
# "./train/de-fr.jsonl",
|
69 |
-
# "./train/fr-cs.jsonl",
|
70 |
-
# "./train/cs-fr.jsonl",
|
71 |
-
# "./train/de-cs.jsonl",
|
72 |
-
# "./train/cs-de.jsonl",
|
73 |
-
# ],
|
74 |
-
"validation": [ f"./valid/{xy}.jsonl" for xy in LPAIRS],
|
75 |
-
# "validation": [
|
76 |
-
# "./valid/en-fr.jsonl",
|
77 |
-
# "./valid/fr-en.jsonl",
|
78 |
-
# "./valid/en-de.jsonl",
|
79 |
-
# "./valid/de-en.jsonl",
|
80 |
-
# "./valid/en-cs.jsonl",
|
81 |
-
# "./valid/cs-en.jsonl",
|
82 |
-
# "./valid/fr-de.jsonl",
|
83 |
-
# "./valid/de-fr.jsonl",
|
84 |
-
# "./valid/fr-cs.jsonl",
|
85 |
-
# "./valid/cs-fr.jsonl",
|
86 |
-
# "./valid/de-cs.jsonl",
|
87 |
-
# "./valid/cs-de.jsonl",
|
88 |
-
# ],
|
89 |
-
"test": [ f"./test/{xy}.jsonl" for xy in LPAIRS if "-en" in xy],
|
90 |
-
# "test": [
|
91 |
-
# "./test/fr-en.jsonl",
|
92 |
-
# "./test/de-en.jsonl",
|
93 |
-
# "./test/cs-en.jsonl",
|
94 |
-
# ],
|
95 |
}
|
96 |
|
97 |
|
|
|
18 |
import csv
|
19 |
import json
|
20 |
import os
|
21 |
+
import itertools
|
22 |
import datasets
|
23 |
|
24 |
|
|
|
51 |
# TODO: Add link to the official dataset URLs here
|
52 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
53 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
54 |
+
LANGS = ["en","fr","cs","de","zh"]
|
55 |
|
56 |
LPAIRS = [
|
57 |
+
f"{x}-{y}" for x,y in itertools.product(LANGS,LANGS) if x!=y
|
58 |
]
|
59 |
|
60 |
_URLs = {
|
61 |
+
"train": [ f"./train/{xy}.jsonl" for xy in LPAIRS] + [ f"./train/{x}.jsonl" for x in LANGS],
|
62 |
+
"validation": [ f"./valid/{xy}.jsonl" for xy in LPAIRS] + [ f"./valid/{x}.jsonl" for x in LANGS],
|
63 |
+
"test": [ f"./test/{xy}.jsonl" for xy in LPAIRS if "-en" in xy] + [ f"./test/{x}.jsonl" for x in LANGS],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
}
|
65 |
|
66 |
|