File size: 6,954 Bytes
b9ecb35
c705152
b9ecb35
 
 
 
a8b76d2
b9ecb35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c705152
 
b9ecb35
 
 
c705152
b9ecb35
 
 
 
 
 
 
c705152
b9ecb35
 
c705152
b9ecb35
 
 
c705152
 
b9ecb35
c705152
b9ecb35
 
 
 
 
 
 
 
 
 
 
 
608fde3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9ecb35
 
 
 
 
 
c705152
b9ecb35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0765f1
 
b9ecb35
a8b76d2
 
608fde3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
"""
A randomly sampled 500K portion of the original LSCP dataset (Enhanced Large Scale Colloquial Persian Language Understanding) for part-of-speech tagging provided by Hezar AI.
"""

import os
import csv
from ast import literal_eval
import datasets

logger = datasets.logging.get_logger(__name__)

_CITATION = """\
@inproceedings{abdi-khojasteh-etal-2020-lscp,
    title = "{LSCP}: Enhanced Large Scale Colloquial {P}ersian Language Understanding",
    author = "Abdi Khojasteh, Hadi  and
      Ansari, Ebrahim  and
      Bohlouli, Mahdi",
    booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
    month = may,
    year = "2020",
    address = "Marseille, France",
    publisher = "European Language Resources Association",
    url = "https://aclanthology.org/2020.lrec-1.776",
    pages = "6323--6327",
    abstract = "Language recognition has been significantly advanced in recent years by means of modern machine learning methods such as deep learning and benchmarks with rich annotations. However, research is still limited in low-resource formal languages. This consists of a significant gap in describing the colloquial language especially for low-resourced ones such as Persian. In order to target this gap for low resource languages, we propose a {``}Large Scale Colloquial Persian Dataset{''} (LSCP). LSCP is hierarchically organized in a semantic taxonomy that focuses on multi-task informal Persian language understanding as a comprehensive problem. This encompasses the recognition of multiple semantic aspects in the human-level sentences, which naturally captures from the real-world sentences. We believe that further investigations and processing, as well as the application of novel algorithms and methods, can strengthen enriching computerized understanding and processing of low resource languages. The proposed corpus consists of 120M sentences resulted from 27M tweets annotated with parsing tree, part-of-speech tags, sentiment polarity and translation in five different languages.",
    language = "English",
    ISBN = "979-10-95546-34-4",
}

"""

_DESCRIPTION = """\
Language recognition has been significantly advanced in recent years by means of modern machine learning methods such as deep learning 
and benchmarks with rich annotations. However, research is still limited in low-resource formal languages. This consists of a significant 
gap in describing the colloquial language especially for low-resourced ones such as Persian. In order to target this gap for low resource languages, 
we propose a “Large Scale Colloquial Persian Dataset” (LSCP). LSCP is hierarchically organized in a semantic taxonomy that focuses on 
multi-task informal Persian language understanding as a comprehensive problem. This encompasses the recognition of multiple semantic aspects in the human-level sentences, 
which naturally captures from the real-world sentences. We believe that further investigations and processing, as well as the application of novel algorithms and methods, 
can strengthen enriching computerized understanding and processing of low resource languages. The proposed corpus consists of 120M sentences resulted from 27M tweets 
annotated with parsing tree, part-of-speech tags, sentiment polarity and translation in five different languages.
"""

_DOWNLOAD_URLS = {
    "train": "https://huggingface.co./datasets/hezarai/lscp-pos-500k/resolve/main/lscp-pos-500k_train.csv",
    "test": "https://huggingface.co./datasets/hezarai/lscp-pos-500k/resolve/main/lscp-pos-500k_test.csv",
}


class LSCPPOS500KConfig(datasets.BuilderConfig):
    """BuilderConfig for LSCP-500K"""

    def __init__(self, **kwargs):
        """BuilderConfig for LSCP-500K.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(LSCPPOS500KConfig, self).__init__(**kwargs)


class LSCPPOS500K(datasets.GeneratorBasedBuilder):
    """LSCP500K dataset."""

    BUILDER_CONFIGS = [
        LSCPPOS500KConfig(
            name="lscp-pos-500k",
            version=datasets.Version("1.0.0"),
            description="A randomly sampled 500K portion of the original LSCP dataset (Enhanced Large Scale Colloquial Persian Language Understanding) for part-of-speech tagging provided by Hezar AI.",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "pos_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                'P',
                                'Ne',
                                'PRO',
                                'CONJ',
                                'N',
                                'PUNC',
                                'Pe',
                                'ADV',
                                'V',
                                'AJ',
                                'AJe',
                                'DET',
                                'POSTP',
                                'NUM',
                                'DETe',
                                'NUMe',
                                'PROe',
                                'ADVe',
                                'RES',
                                'CL',
                                'INT',
                                'CONJe',
                                'RESe',
                            ]
                        )
                    ),
                }
            ),
            supervised_keys=None,
            homepage="https://huggingface.co./datasets/hezarai/lscp-pos-500k",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
        test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
            ),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as csv_file:
            csv_reader = csv.reader(
                csv_file, quotechar='"', skipinitialspace=True
            )
            for id_, row in enumerate(csv_reader):
                if id_ < 1:
                    continue
                tokens, pos_tags = row
                tokens = literal_eval(tokens)
                pos_tags = literal_eval(pos_tags)
                yield id_, {"tokens": tokens, "pos_tags": pos_tags}