albertvillanova HF staff commited on
Commit
ca93dc7
1 Parent(s): 8aa5ce8

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (a4166a8dd6582e30366100bf993f4caa0dab2a65)
- Delete loading script (997b2fdbad440475d384c252fc3b0ae00a86bfd7)

README.md CHANGED
@@ -19,7 +19,6 @@ task_categories:
19
  task_ids:
20
  - language-modeling
21
  - masked-language-modeling
22
- paperswithcode_id: null
23
  pretty_name: FarsiNews
24
  dataset_info:
25
  features:
@@ -33,13 +32,20 @@ dataset_info:
33
  sequence: string
34
  splits:
35
  - name: hamshahri
36
- num_bytes: 1267659
37
  num_examples: 2203
38
  - name: radiofarda
39
- num_bytes: 265272
40
  num_examples: 284
41
- download_size: 1648337
42
- dataset_size: 1532931
 
 
 
 
 
 
 
43
  ---
44
 
45
  # Dataset Card for FarsiNews
 
19
  task_ids:
20
  - language-modeling
21
  - masked-language-modeling
 
22
  pretty_name: FarsiNews
23
  dataset_info:
24
  features:
 
32
  sequence: string
33
  splits:
34
  - name: hamshahri
35
+ num_bytes: 1267639
36
  num_examples: 2203
37
  - name: radiofarda
38
+ num_bytes: 265252
39
  num_examples: 284
40
+ download_size: 708765
41
+ dataset_size: 1532891
42
+ configs:
43
+ - config_name: default
44
+ data_files:
45
+ - split: hamshahri
46
+ path: data/hamshahri-*
47
+ - split: radiofarda
48
+ path: data/radiofarda-*
49
  ---
50
 
51
  # Dataset Card for FarsiNews
data/hamshahri-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d1ca64ec366814ba6902e0f2868fcfbce4e527356c7125f0651941a0c678b1e
3
+ size 591058
data/radiofarda-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02fcfcaed69a429c921215b9dbc28679de3d1fce5906ad522887d9ccae49323a
3
+ size 117707
farsi_news.py DELETED
@@ -1,93 +0,0 @@
1
- # Copyright 2020 the HuggingFace Datasets Authors.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # Lint as: python3
16
- """Farsi News Datasets: Hamshahri and RadioFarda"""
17
-
18
-
19
- import json
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- """
26
-
27
- _DESCRIPTION = """\
28
- Contains Farsi (Persian) datasets for Machine Learning tasks, particularly NLP.
29
- These datasets have been extracted from the RSS feed of two Farsi news agency websites:
30
-
31
- - Hamshahri
32
- - RadioFarda
33
- """
34
-
35
- _URL = "https://raw.githubusercontent.com/sci2lab/Farsi-datasets/master/farsi_news/"
36
- _URLS = {
37
- "hamshahri": _URL + "hamshahri.json",
38
- "radiofarda": _URL + "radiofarda.json",
39
- }
40
-
41
-
42
- class FarsiNews(datasets.GeneratorBasedBuilder):
43
- """Farsi News Datasets: Hamshahri and RadioFarda"""
44
-
45
- VERSION = datasets.Version("1.0.0")
46
-
47
- def _info(self):
48
- return datasets.DatasetInfo(
49
- # This is the description that will appear on the datasets page.
50
- description=_DESCRIPTION,
51
- # datasets.features.FeatureConnectors
52
- features=datasets.Features(
53
- {
54
- "title": datasets.Value("string"),
55
- "summary": datasets.Value("string"),
56
- "link": datasets.Value("string"),
57
- "tags": datasets.features.Sequence(datasets.Value("string")),
58
- }
59
- ),
60
- # If there's a common (input, target) tuple from the features,
61
- # specify them here. They'll be used if as_supervised=True in
62
- # builder.as_dataset.
63
- supervised_keys=None,
64
- # Homepage of the dataset for documentation
65
- homepage="https://github.com/sci2lab/Farsi-datasets",
66
- citation=_CITATION,
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- """Returns SplitGenerators."""
71
- # dl_manager is a datasets.download.DownloadManager that can be used to
72
- # download and extract URLs
73
- urls_to_download = _URLS
74
- dl_dir = dl_manager.download_and_extract(urls_to_download)
75
- return [
76
- datasets.SplitGenerator(
77
- name="hamshahri",
78
- # These kwargs will be passed to _generate_examples
79
- gen_kwargs={"filepath": dl_dir["hamshahri"], "split": "hamshahri"},
80
- ),
81
- datasets.SplitGenerator(
82
- name="radiofarda",
83
- # These kwargs will be passed to _generate_examples
84
- gen_kwargs={"filepath": dl_dir["radiofarda"], "split": "radiofarda"},
85
- ),
86
- ]
87
-
88
- def _generate_examples(self, filepath, split):
89
- """Yields examples."""
90
- with open(filepath, encoding="utf-8") as f:
91
- data = json.load(f)
92
- for id_, example in enumerate(data):
93
- yield id_, example