Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
NamCyan commited on
Commit
1c524fb
·
1 Parent(s): db87eef

upload load script

Browse files
Files changed (1) hide show
  1. the-vault-class.py +230 -0
the-vault-class.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pyarrow as pa
4
+ import pyarrow.parquet as pq
5
+ import datasets
6
+
7
+
8
+ # Meta infomation
9
+ _REPO_NAME = 'Fsoft-AIC/the-vault-class'
10
+
11
+ _DESCRIPTION = """The Vault is a multilingual code-text dataset with over 40 million pairs covering 10 popular programming languages.
12
+ It is the largest corpus containing parallel code-text data. By building upon The Stack, a massive raw code sample collection,
13
+ the Vault offers a comprehensive and clean resource for advancing research in code understanding and generation. It provides a
14
+ high-quality dataset that includes code-text pairs at multiple levels, such as class and inline-level, in addition to the function level.
15
+ The Vault can serve many purposes at multiple levels."""
16
+
17
+ _HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
18
+ _LICENSE = "MIT License"
19
+ _CITATION = """
20
+ @article{manh2023vault,
21
+ title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation},
22
+ author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ},
23
+ journal={arXiv preprint arXiv:2305.06156},
24
+ year={2023}
25
+ }
26
+ """
27
+ ################################################################################################
28
+
29
+ # Config metadata
30
+ _LANG_TO_TEXT = {
31
+ "python": "python",
32
+ "c": "c",
33
+ "c#": "c_sharp",
34
+ "c++": "cpp",
35
+ "go": "go",
36
+ "java": "java",
37
+ "javascript": "javascript",
38
+ "php": "php",
39
+ "ruby": "ruby",
40
+ "rust": "rust",
41
+ }
42
+ _LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys())
43
+
44
+ _TEXT_TO_LANG = {}
45
+ for lang in _LANG_TO_TEXT:
46
+ _TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang
47
+
48
+ num_shard_split = {
49
+ "ruby": 3,
50
+ "c_sharp": 17,
51
+ "cpp": 1,
52
+ "java": 60,
53
+ "javascript": 3,
54
+ "php": 13,
55
+ "python": 5,
56
+ "rust": 1,
57
+ }
58
+
59
+ ################################################################################################
60
+
61
+ class TheVaultClassConfig(datasets.BuilderConfig):
62
+ """BuilderConfig for The Vault dataset."""
63
+
64
+ def __init__(self, *args, languages=["all"], **kwargs):
65
+ """BuilderConfig for the The Vault dataset.
66
+ Args:
67
+ languages (:obj:`List[str]`): List of languages to load.
68
+ **kwargs: keyword arguments forwarded to super.
69
+ """
70
+ super().__init__(
71
+ *args,
72
+ name= "+".join([_LANG_TO_TEXT[lang] if lang in _LANG_TO_TEXT else lang for lang in languages]),
73
+ **kwargs,
74
+ )
75
+
76
+ languages = set([lang.lower() for lang in languages])
77
+
78
+ assert "go" not in languages and "c" not in languages, "C and Go do not have class level data."
79
+ assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."
80
+
81
+ if "all" in languages:
82
+ assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
83
+ else:
84
+ languages = [_LANG_TO_TEXT[lang] for lang in languages] # Convert to text name
85
+
86
+ self.languages = list(languages)
87
+
88
+ class TheVaultClass(datasets.GeneratorBasedBuilder):
89
+ """The Vault dataset."""
90
+
91
+ VERSION = datasets.Version("1.0.0")
92
+
93
+ BUILDER_CONFIG_CLASS = TheVaultClassConfig
94
+ BUILDER_CONFIGS = [TheVaultClassConfig(languages=[lang]) for lang in _LANG_CONFIGS]
95
+ DEFAULT_CONFIG_NAME = "all"
96
+
97
+
98
+ def _info(self):
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=datasets.Features({
102
+ "hexsha": datasets.Value("string"),
103
+ "repo": datasets.Value("string"),
104
+ "path": datasets.Value("string"),
105
+ "license": datasets.Sequence(datasets.Value("string")),
106
+ "language": datasets.Value("string"),
107
+ "identifier": datasets.Value("string"),
108
+ "original_string": datasets.Value("string"),
109
+ "original_docstring": datasets.Value("string"),
110
+ "docstring": datasets.Value("string"),
111
+ "docstring_tokens": datasets.Sequence(datasets.Value("string")),
112
+ "code": datasets.Value("string"),
113
+ "code_tokens": datasets.Sequence(datasets.Value("string")),
114
+ "short_docstring": datasets.Value("string"),
115
+ "short_docstring_tokens": datasets.Sequence(datasets.Value("string")),
116
+ "comment": datasets.Sequence(datasets.Value("string")),
117
+ "parameters": [
118
+ {
119
+ "param": datasets.Value("string"),
120
+ "type": datasets.Value("string"),
121
+ }
122
+ ],
123
+ "docstring_params":
124
+ {
125
+ "returns": [
126
+ {
127
+ "docstring": datasets.Value("string"),
128
+ "docstring_tokens": datasets.Sequence(datasets.Value("string")),
129
+ "type": datasets.Value("string")
130
+ }
131
+ ],
132
+ "raises": [
133
+ {
134
+ "docstring": datasets.Value("string"),
135
+ "docstring_tokens": datasets.Sequence(datasets.Value("string")),
136
+ "type": datasets.Value("string")
137
+ }
138
+ ],
139
+ "params": [
140
+ {
141
+ "identifier": datasets.Value("string"),
142
+ "type": datasets.Value("string"),
143
+ "docstring": datasets.Value("string"),
144
+ "docstring_tokens": datasets.Sequence(datasets.Value("string")),
145
+ "default": datasets.Value("string"),
146
+ "is_optional": datasets.Value("bool")
147
+ }
148
+ ],
149
+ "outlier_params": [
150
+ {
151
+ "identifier": datasets.Value("string"),
152
+ "type": datasets.Value("string"),
153
+ "docstring": datasets.Value("string"),
154
+ "docstring_tokens": datasets.Sequence(datasets.Value("string")),
155
+ "default": datasets.Value("string"),
156
+ "is_optional": datasets.Value("bool")
157
+ }
158
+ ],
159
+ "others": [
160
+ {
161
+ "identifier": datasets.Value("string"),
162
+ "docstring": datasets.Value("string"),
163
+ "docstring_tokens": datasets.Sequence(datasets.Value("string"))
164
+ }
165
+ ]
166
+ },
167
+ }),
168
+ supervised_keys=None,
169
+ homepage=_HOMEPAGE,
170
+ license=_LICENSE,
171
+ citation=_CITATION,
172
+ )
173
+
174
+ def _split_generators(self, dl_manager):
175
+ generators = []
176
+ languages = self.config.languages
177
+
178
+ if "all" in languages:
179
+ languages = list(_LANG_TO_TEXT.values())
180
+
181
+
182
+ split_files = []
183
+ for language in languages:
184
+ num_shards = num_shard_split[language]
185
+ data_files = [
186
+ f"data/train/{language}-{_index:05d}-of-{num_shards:05d}.parquet"
187
+ for _index in range(num_shards)
188
+ ]
189
+ files = dl_manager.download(data_files)
190
+ split_files.extend(files)
191
+
192
+ generators.append(
193
+ datasets.SplitGenerator(
194
+ name="train",
195
+ gen_kwargs={
196
+ "files": split_files,
197
+ },
198
+ ),
199
+ )
200
+
201
+ def _generate_examples(self, files):
202
+ key = 0
203
+ for file_idx, file in enumerate(files):
204
+ with open(file, "rb") as f:
205
+ parquet_file = pq.ParquetFile(f)
206
+ for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
207
+ pa_table = pa.Table.from_batches([record_batch])
208
+ for row_index in range(pa_table.num_rows):
209
+ row = pa_table.slice(row_index, 1).to_pydict()
210
+
211
+ yield key, {
212
+ "hexsha": row['hexsha'][0],
213
+ "repo": row['repo'][0],
214
+ "path": row['path'][0],
215
+ "license": row['license'][0],
216
+ "language": row['language'][0],
217
+ "identifier": row['identifier'][0],
218
+ "original_string": row['original_string'][0],
219
+ "original_docstring": row['original_docstring'][0],
220
+ "docstring": row['docstring'][0],
221
+ "docstring_tokens": row['docstring_tokens'][0],
222
+ "code": row['code'][0],
223
+ "code_tokens": row['code_tokens'][0],
224
+ "short_docstring": row['short_docstring'][0],
225
+ "short_docstring_tokens": row['short_docstring_tokens'][0],
226
+ "comment": row['comment'][0],
227
+ "parameters": row['parameters'][0],
228
+ "docstring_params": row['docstring_params'][0],
229
+ }
230
+ key += 1