jonghwi commited on
Commit
aa32e7b
1 Parent(s): 60f2742

Upload ko, ar, ja, hi, es

Browse files
.gitattributes CHANGED
@@ -53,3 +53,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ arabic-train.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ hindi-train.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ japanese-train.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ korean-train.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ spanish-train.jsonl filter=lfs diff=lfs merge=lfs -text
arabic-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a6cafcca2c0afa14cc1bc42f3bdd960cd717559677d6fa56ea9671e5c0860f
3
+ size 7023014491
hindi-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af77a528b0cfd859c9a7442e8aa59b13ecdd8f09d3a62236942f7418bb2d72e1
3
+ size 11253720472
japanese-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3172122c1df8c18b5bb242e8d2550a7fb6bfb24c731f970ce1d4c0bad5ff5f10
3
+ size 5954401301
korean-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6edaddcc1a460b67d69688e51c8f5a7b27630a79c0f7508c478d10f83e1cae3
3
+ size 5383439029
spanish-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f021aa3d725a35dc8831accce8600a7b6baae6aafb44eff80776b93113af1758
3
+ size 5424785574
tevatron-mmarco-passage.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.Wikipedia
15
+
16
+ # Lint as: python3
17
+ """mMARCO Passage dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ """
25
+
26
+ _DESCRIPTION = "dataset load script for mMARCO Passage following tevatron training data format"
27
+
28
+ languages = [
29
+ "arabic",
30
+ "hindi",
31
+ "korean",
32
+ "japanese",
33
+ "spanish",
34
+ ]
35
+ _DATASET_URLS = {
36
+ lang: {
37
+ 'train': f"https://huggingface.co/datasets/jonghwi/tevatron-mmarco-passage/resolve/main/{lang}-train.jsonl.gz",
38
+ } for lang in languages
39
+ }
40
+
41
+ class MMarcoPassage(datasets.GeneratorBasedBuilder):
42
+ BUILDER_CONFIGS = [datasets.BuilderConfig(
43
+ version=datasets.Version("0.0.1"),
44
+ name=lang,
45
+ description=f"mMARCO passage train datasets for {lang} following tevatron training data format"
46
+ ) for lang in languages
47
+ ]
48
+
49
+ def _info(self):
50
+ features = datasets.Features({
51
+ 'query_id': datasets.Value('string'),
52
+ 'query': datasets.Value('string'),
53
+ 'positive_passages': [
54
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string')}
55
+ ],
56
+ 'negative_passages': [
57
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string')}
58
+ ],
59
+ })
60
+ return datasets.DatasetInfo(
61
+ # This is the description that will appear on the datasets page.
62
+ description=_DESCRIPTION,
63
+ # This defines the different columns of the dataset and their types
64
+ features=features, # Here we define them above because they are different between the two configurations
65
+ supervised_keys=None,
66
+ # Homepage of the dataset for documentation
67
+ homepage="",
68
+ # License for the dataset if available
69
+ license="",
70
+ # Citation for the dataset
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ lang = self.config.name
76
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[lang])
77
+ '''
78
+ if self.config.data_files:
79
+ downloaded_files = self.config.data_files
80
+ else:
81
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
82
+ '''
83
+ splits = [
84
+ datasets.SplitGenerator(
85
+ name=split,
86
+ gen_kwargs={
87
+ "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
88
+ },
89
+ ) for split in downloaded_files
90
+ ]
91
+ return splits
92
+
93
+ def _generate_examples(self, files):
94
+ """Yields examples."""
95
+ for filepath in files:
96
+ with open(filepath, encoding="utf-8") as f:
97
+ for line in f:
98
+ data = json.loads(line)
99
+ if data.get('negative_passages') is None:
100
+ data['negative_passages'] = []
101
+ if data.get('positive_passages') is None:
102
+ data['positive_passages'] = []
103
+ yield data['query_id'], data