Madjakul commited on
Commit
442094b
1 Parent(s): bc19630

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +11 -10
  2. halvest.py +213 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- pretty_name: HALvest-Raw
3
 
4
  configs:
5
  - config_name: ar
@@ -206,7 +206,7 @@ source_datasets:
206
 
207
 
208
  <div align="center">
209
- <h1> HALvest-Raw </h1>
210
  <h3> Open Scientific Papers Harvested from HAL (Unfiltered) </h3>
211
  </div>
212
 
@@ -228,7 +228,7 @@ You can download the dataset using Hugging Face datasets:
228
  ```py
229
  from datasets import load_dataset
230
 
231
- ds = load_dataset("Madjakul/HALvest-R", "en")
232
  ```
233
 
234
 
@@ -337,13 +337,14 @@ The corpus is extracted from the [HAL's open archive](https://hal.science/) whic
337
  ## Citation
338
 
339
  ```bib
340
- @software{almanach_halvest_2024,
341
- author = {Kulumba, Francis and Antoun, Wissam and Vimont, Guillaume and Romary, Laurent},
342
- title = {HALvest: Open Scientific Papers Harvested from HAL.},
343
- month = April,
344
- year = 2024,
345
- company = Almanach,
346
- url = {https://github.com/Madjakul/HALvesting}
 
347
  }
348
  ```
349
 
 
1
  ---
2
+ pretty_name: HALvest
3
 
4
  configs:
5
  - config_name: ar
 
206
 
207
 
208
  <div align="center">
209
+ <h1> HALvest </h1>
210
  <h3> Open Scientific Papers Harvested from HAL (Unfiltered) </h3>
211
  </div>
212
 
 
228
  ```py
229
  from datasets import load_dataset
230
 
231
+ ds = load_dataset("almanach/HALvest", "en")
232
  ```
233
 
234
 
 
337
  ## Citation
338
 
339
  ```bib
340
+ @misc{kulumba2024harvestingtextualstructureddata,
341
+ title={Harvesting Textual and Structured Data from the HAL Publication Repository},
342
+ author={Francis Kulumba and Wissam Antoun and Guillaume Vimont and Laurent Romary},
343
+ year={2024},
344
+ eprint={2407.20595},
345
+ archivePrefix={arXiv},
346
+ primaryClass={cs.DL},
347
+ url={https://arxiv.org/abs/2407.20595},
348
  }
349
  ```
350
 
halvest.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # halvest-r.py
2
+
3
+ import collections
4
+ import gzip
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+ _DESCRIPTION = "HALvest Raw"
13
+ _URL = "https://huggingface.co/datasets/almanach/HALvest"
14
+ _LICENSE = """
15
+ The licence terms for HALvest strictly follows those of HAL.
16
+ Please refer to the below license when using this dataset.
17
+ - HAL license: https://doc.archives-ouvertes.fr/en/legal-aspects/
18
+ The corpus is extracted from the HAL's open archive which distributes scientific \
19
+ publications following open access principles. The corpus is made up of both \
20
+ creative commons licensed and copyrighted documents (distribution authorized on \
21
+ HAL by the publisher). This must be considered prior to using this dataset for any \
22
+ purpose, other than training deep learning models, data mining etc. We do not own \
23
+ any of the text from which these data has been extracted.
24
+ """
25
+ _CITATION = """
26
+ @misc{kulumba2024harvestingtextualstructureddata,
27
+ title={Harvesting Textual and Structured Data from the HAL Publication Repository},
28
+ author={Francis Kulumba and Wissam Antoun and Guillaume Vimont and Laurent Romary},
29
+ year={2024},
30
+ eprint={2407.20595},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.DL},
33
+ url={https://arxiv.org/abs/2407.20595},
34
+ }
35
+ """
36
+ _BASE_DATA_PATH = "{language}/"
37
+ _BASE_CHECKSUM_FILENAME = "checksum.sha256"
38
+
39
+
40
+ def _languages():
41
+ """Creates the sorted dictionary of language codes, and language names."""
42
+ langs = {
43
+ "Albanian": "sq",
44
+ "Arabic": "ar",
45
+ "Armenian": "hy",
46
+ "Azerbaijani": "az",
47
+ "Basque": "eu",
48
+ "Bosnian": "bs",
49
+ "Breton": "br",
50
+ "Bulgarian": "bg",
51
+ "Catalan": "ca",
52
+ "Chinese": "zh",
53
+ "Corsican": "co",
54
+ "Croatian": "hr",
55
+ "Czech": "cs",
56
+ "Danish": "da",
57
+ "English": "en",
58
+ "Esperanto": "eo",
59
+ "Estonian": "et",
60
+ "Filipino": "tl",
61
+ "Finnish": "fi",
62
+ "French": "fr",
63
+ "Galician": "gl",
64
+ "German": "de",
65
+ "Greek": "el",
66
+ "Guarani": "gn",
67
+ "Hebrew": "he",
68
+ "Hindi": "hi",
69
+ "Hungarian": "hu",
70
+ "Indonesian": "id",
71
+ "Interlingue": "ie",
72
+ "Italian": "it",
73
+ "Japanese": "ja",
74
+ "Kazakh": "kk",
75
+ "Korean": "ko",
76
+ "Lithuanian": "lt",
77
+ "Macedonian": "mk",
78
+ "Marathi": "mr",
79
+ "Norwegian": "no",
80
+ "Occitan": "oc",
81
+ "Persian": "fa",
82
+ "Polish": "pl",
83
+ "Portuguese": "pt",
84
+ "Romanian": "ro",
85
+ "Russian": "ru",
86
+ "Serbian": "sr",
87
+ "Slovak": "sk",
88
+ "Slovenian": "sl",
89
+ "Spanish": "es",
90
+ "Swahili": "sw",
91
+ "Swedish": "sv",
92
+ "Tamil": "ta",
93
+ "Tetum": "tet",
94
+ "Thai": "th",
95
+ "Tibetan": "bo",
96
+ "Turkish": "tr",
97
+ "Turkmen": "tk",
98
+ "Ukrainian": "uk",
99
+ "Vietnamese": "vi",
100
+ }
101
+ langs = {v: k for k, v in langs.items()}
102
+ return collections.OrderedDict(sorted(langs.items()))
103
+
104
+
105
+ class HALvest_RConfig(datasets.BuilderConfig):
106
+ """HALvest builder config.
107
+ Parameters
108
+ ----------
109
+ language: str
110
+ ISO 639 language code.
111
+ Attributes
112
+ ----------
113
+ base_data_path: str
114
+ f"{self.language}/".
115
+ """
116
+
117
+ def __init__(self, language: str, **kwargs):
118
+ if language not in _languages():
119
+ raise ValueError("Invalid language: %s " % language)
120
+
121
+ name = f"{language}"
122
+ description = f"""
123
+ {_languages()[language]} HALvest dataset from February 2024.
124
+ """
125
+ super(HALvest_RConfig, self).__init__(
126
+ name=name, description=description, **kwargs
127
+ )
128
+ self.language = language
129
+ self.base_data_path = _BASE_DATA_PATH.format(language=language)
130
+
131
+
132
+ class HALvest_R(datasets.GeneratorBasedBuilder):
133
+ """HALvest: Open Scientific Papers Harvested from HAL."""
134
+
135
+ BUILDER_CONFIGS = [
136
+ HALvest_RConfig(language=language, version=datasets.Version("0.1.0"))
137
+ for language in _languages()
138
+ ]
139
+ BUILDER_CONFIG_CLASS = HALvest_RConfig
140
+
141
+ def _info(self):
142
+ return datasets.DatasetInfo(
143
+ description=_DESCRIPTION,
144
+ features=datasets.Features(
145
+ {
146
+ "halid": datasets.Value("string"),
147
+ "lang": datasets.Value("string"),
148
+ "domain": datasets.Sequence("string"),
149
+ "timestamp": datasets.Value("string"),
150
+ "year": datasets.Value("string"),
151
+ "url": datasets.Value("string"),
152
+ "text": datasets.Value("string"),
153
+ "token_count": datasets.Value("int32"),
154
+ "rps_doc_frac_all_caps_words": datasets.Value("float64"),
155
+ "rps_doc_frac_lines_end_with_ellipsis": datasets.Value("float64"),
156
+ "rps_doc_frac_no_alph_words": datasets.Value("float64"),
157
+ "rps_doc_lorem_ipsum": datasets.Value("float64"),
158
+ "rps_doc_mean_word_length": datasets.Value("float64"),
159
+ "rps_doc_stop_word_fraction": datasets.Value("float64"),
160
+ "rps_doc_symbol_to_word_ratio": datasets.Value("float64"),
161
+ "rps_doc_frac_unique_words": datasets.Value("float64"),
162
+ "rps_doc_unigram_entropy": datasets.Value("float64"),
163
+ "rps_doc_word_count": datasets.Value("int64"),
164
+ "doc_frac_lines_ending_with_terminal_punctution_mark": datasets.Value("float64"),
165
+ "rps_lines_frac_start_with_bulletpoint": datasets.Value("float64"),
166
+ "rps_doc_num_sentences": datasets.Value("int64"),
167
+ "rps_frac_chars_in_dupe_5grams": datasets.Value("float64"),
168
+ "rps_frac_chars_in_dupe_6grams": datasets.Value("float64"),
169
+ "rps_frac_chars_in_dupe_7grams": datasets.Value("float64"),
170
+ "rps_frac_chars_in_dupe_8grams": datasets.Value("float64"),
171
+ "rps_frac_chars_in_dupe_9grams": datasets.Value("float64"),
172
+ "rps_frac_chars_in_dupe_10grams": datasets.Value("float64"),
173
+ "kenlm_pp": datasets.Value("float64"),
174
+ }
175
+ ),
176
+ supervised_keys=None,
177
+ homepage=_URL,
178
+ citation=_CITATION,
179
+ license=_LICENSE,
180
+ )
181
+
182
+ def _split_generators(self, dl_manager):
183
+ checksum_path = os.path.join(
184
+ self.config.base_data_path, _BASE_CHECKSUM_FILENAME
185
+ )
186
+ checksum_file = dl_manager.download(checksum_path)
187
+
188
+ with open(checksum_file, encoding="utf-8") as f:
189
+ data_filenames = [line.split("\t")[1] for line in f if line]
190
+ data_urls = [
191
+ os.path.join(self.config.base_data_path, data_filename.rstrip("\n"))
192
+ for data_filename in data_filenames
193
+ ]
194
+
195
+ downloaded_files = dl_manager.download(
196
+ [url for url in data_urls if url.endswith(".gz")]
197
+ )
198
+
199
+ return [
200
+ datasets.SplitGenerator(
201
+ name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}
202
+ )
203
+ ]
204
+
205
+ def _generate_examples(self, filepaths):
206
+ id_ = 0
207
+ for filepath in filepaths:
208
+ logger.info("Generating examples from = %s", filepath)
209
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
210
+ for line in f:
211
+ js_line = json.loads(line)
212
+ yield id_, js_line
213
+ id_ += 1