File size: 2,580 Bytes
63f847c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import json
import datasets


_CITATION = """\
"""

_LICENSE = """\
"""

_DESCRIPTION = """\
Review classification based on Yandex Georeview dataset.
"""

_LANGUAGES = {
    "ru": "Russian"
}
_HOMEPAGE_URL = ""
_DOWNLOAD_URL = "{split}.jsonl"
_VERSION = "1.0.0"


class GeoreviewClassConfig(datasets.BuilderConfig):
    """BuilderConfig for GeoreviewCalssConfig."""

    def __init__(self, languages=None, **kwargs):
        super(GeoreviewClassConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
        self.languages = languages


class GeoreviewClass(datasets.GeneratorBasedBuilder):

    """The Georeview Corpus"""

    BUILDER_CONFIGS = [
        GeoreviewClassConfig(
            name='ru',
            languages='ru',
            description="Review classification based on Yandex Georeview dataset",
        )
    ]
    BUILDER_CONFIG_CLASS = GeoreviewClassConfig
    DEFAULT_CONFIG_NAME = 'ru'

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "label": datasets.Value("int32"),
                    "label_text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            license=_LICENSE,
            homepage=_HOMEPAGE_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        
        train_urls = [_DOWNLOAD_URL.format(split="train")]
        dev_urls = [_DOWNLOAD_URL.format(split="validation")]
        test_urls = [_DOWNLOAD_URL.format(split="test")]

        train_paths = dl_manager.download_and_extract(train_urls)
        dev_paths = dl_manager.download_and_extract(dev_urls)
        test_paths = dl_manager.download_and_extract(test_urls)
        print(train_paths, dev_paths, test_paths)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths}),
        ]

    def _generate_examples(self, file_paths):
        row_count = 0
        for file_path in file_paths:
            with open(file_path, "r", encoding="utf-8") as f:
                for line in f:
                    yield row_count, json.loads(line)
                    row_count += 1