arxyzan commited on
Commit
b9ecb35
1 Parent(s): 7653a03

Create lscp-500k.py

Browse files
Files changed (1) hide show
  1. lscp-500k.py +127 -0
lscp-500k.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A randomly sampled 500K portion of the original LSCP dataset (Enhanced Large Scale Colloquial Persian Language Understanding) provided by Hezar AI.
3
+ """
4
+
5
+ import os
6
+ import csv
7
+ import datasets
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+ _CITATION = """\
13
+ @inproceedings{abdi-khojasteh-etal-2020-lscp,
14
+ title = "{LSCP}: Enhanced Large Scale Colloquial {P}ersian Language Understanding",
15
+ author = "Abdi Khojasteh, Hadi and
16
+ Ansari, Ebrahim and
17
+ Bohlouli, Mahdi",
18
+ booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
19
+ month = may,
20
+ year = "2020",
21
+ address = "Marseille, France",
22
+ publisher = "European Language Resources Association",
23
+ url = "https://aclanthology.org/2020.lrec-1.776",
24
+ pages = "6323--6327",
25
+ abstract = "Language recognition has been significantly advanced in recent years by means of modern machine learning methods such as deep learning and benchmarks with rich annotations. However, research is still limited in low-resource formal languages. This consists of a significant gap in describing the colloquial language especially for low-resourced ones such as Persian. In order to target this gap for low resource languages, we propose a {``}Large Scale Colloquial Persian Dataset{''} (LSCP). LSCP is hierarchically organized in a semantic taxonomy that focuses on multi-task informal Persian language understanding as a comprehensive problem. This encompasses the recognition of multiple semantic aspects in the human-level sentences, which naturally captures from the real-world sentences. We believe that further investigations and processing, as well as the application of novel algorithms and methods, can strengthen enriching computerized understanding and processing of low resource languages. The proposed corpus consists of 120M sentences resulted from 27M tweets annotated with parsing tree, part-of-speech tags, sentiment polarity and translation in five different languages.",
26
+ language = "English",
27
+ ISBN = "979-10-95546-34-4",
28
+ }
29
+
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ Language recognition has been significantly advanced in recent years by means of modern machine learning methods such as deep learning
34
+ and benchmarks with rich annotations. However, research is still limited in low-resource formal languages. This consists of a significant
35
+ gap in describing the colloquial language especially for low-resourced ones such as Persian. In order to target this gap for low resource languages,
36
+ we propose a “Large Scale Colloquial Persian Dataset” (LSCP). LSCP is hierarchically organized in a semantic taxonomy that focuses on
37
+ multi-task informal Persian language understanding as a comprehensive problem. This encompasses the recognition of multiple semantic aspects in the human-level sentences,
38
+ which naturally captures from the real-world sentences. We believe that further investigations and processing, as well as the application of novel algorithms and methods,
39
+ can strengthen enriching computerized understanding and processing of low resource languages. The proposed corpus consists of 120M sentences resulted from 27M tweets
40
+ annotated with parsing tree, part-of-speech tags, sentiment polarity and translation in five different languages.
41
+ """
42
+
43
+ _DOWNLOAD_URLS = {
44
+ "train": "https://huggingface.co/datasets/hezarai/lscp-500k/blob/main/lscp-500k_train.csv",
45
+ "test": "https://huggingface.co/datasets/hezarai/lscp-500k/blob/main/lscp-500k_test.csv",
46
+ }
47
+
48
+
49
+ class LSCP500KConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for LSCP-500K"""
51
+
52
+ def __init__(self, **kwargs):
53
+ """BuilderConfig for LSCP-500K.
54
+ Args:
55
+ **kwargs: keyword arguments forwarded to super.
56
+ """
57
+ super(LSCP500KConfig, self).__init__(**kwargs)
58
+
59
+
60
+ class LSCP500K(datasets.GeneratorBasedBuilder):
61
+ """LSCP500K dataset."""
62
+
63
+ BUILDER_CONFIGS = [
64
+ LSCP500KConfig(
65
+ name="lscp-500k",
66
+ version=datasets.Version("1.0.0"),
67
+ description="LSCP500K dataset",
68
+ ),
69
+ ]
70
+
71
+ def _info(self):
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "tokens": datasets.Sequence(datasets.Value("string")),
78
+ "pos_tags": datasets.Sequence(
79
+ datasets.features.ClassLabel(
80
+ names=[
81
+ "ADJ",
82
+ "ADP",
83
+ "ADV",
84
+ "CCONJ",
85
+ "DET",
86
+ "INTJ",
87
+ "NOUN",
88
+ "NUM",
89
+ "PRON",
90
+ "PUNCT",
91
+ "VERB",
92
+ "X",
93
+ ]
94
+ )
95
+ ),
96
+ }
97
+ ),
98
+ supervised_keys=None,
99
+ homepage="https://huggingface.co/datasets/hezarai/lscp-500k",
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+ """Returns SplitGenerators."""
105
+ train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
106
+ test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
107
+
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
114
+ ),
115
+ ]
116
+
117
+ def _generate_examples(self, filepath):
118
+ logger.info("⏳ Generating examples from = %s", filepath)
119
+ with open(filepath, encoding="utf-8") as csv_file:
120
+ csv_reader = csv.reader(
121
+ csv_file, quotechar='"', skipinitialspace=True
122
+ )
123
+ for id_, row in enumerate(csv_reader):
124
+ tokens, pos_tags = row
125
+ yield id_, {"tokens": tokens, "pos_tags": pos_tags}
126
+
127
+