Datasets:
cjvt
/

Tasks:
Other
License:
File size: 7,982 Bytes
4bd8d10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
026beac
 
 
 
 
 
 
 
 
4bd8d10
 
 
 
026beac
 
 
 
 
 
 
 
 
 
 
4bd8d10
 
026beac
4bd8d10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130

# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import csv
import json
import os

import datasets

_CITATION = r"""\
@misc{11356/1486,
 title = {Multilingual comparable corpora of parliamentary debates {ParlaMint} 3.0},
 author = {Erjavec, Toma{\v z} and Kopp, Maty{\'a}{\v s} and Ogrodniczuk, Maciej and Osenova, Petya and Fi{\v s}er, Darja and Pirker, Hannes and Wissik, Tanja and Schopper, Daniel and Kirnbauer, Martin and Ljube{\v s}i{\'c}, Nikola and Rupnik, Peter and Mochtak, Michal and Pol, Henk van der and Depoorter, Griet and Simov, Kiril and Grigorova, Vladislava and Grigorov, Ilko and Jongejan, Bart and Haltrup Hansen, Dorte and Navarretta, Costanza and M{\"o}lder, Martin and Kahusk, Neeme and Vider, Kadri and Bel, Nuria and Antiba-Cartazo, Iv{\'a}n and Pisani, Marilina and Zevallos, Rodolfo and Vladu, Adina Ioana and Magari{\~n}os, Carmen and Bardanca, Daniel and Barcala, Mario and Garcia, Marcos and P{\'e}rez Lago, Mar{\'{\i}}a and Garc{\'{\i}}a Louzao, Pedro and Vivel Couso, Ainhoa and V{\'a}zquez Abu{\'{\i}}n, Marta and Garc{\'{\i}}a D{\'{\i}}az, Noelia and Vidal Migu{\'e}ns, Adri{\'a}n and Fern{\'a}ndez Rei, Elisa and Regueira, Xos{\'e} Lu{\'{\i}}s and Diwersy, Sascha and Luxardo, Giancarlo and Coole, Matthew and Rayson, Paul and Nwadukwe, Amanda and Gkoumas, Dimitris and Papavassiliou, Vassilis and Prokopidis, Prokopis and Gavriilidou, Maria and Piperidis, Stelios and Ligeti-Nagy, No{\'e}mi and Jelencsik-M{\'a}tyus, Kinga and Varga, Zs{\'o}fia and Dod{\'e}, R{\'e}ka and Barkarson, Starkaður and Agnoloni, Tommaso and Bartolini, Roberto and Frontini, Francesca and Montemagni, Simonetta and Quochi, Valeria and Venturi, Giulia and Ruisi, Manuela and Marchetti, Carlo and Battistoni, Roberto and Darģis, Roberts and van Heusden, Ruben and Marx, Maarten and Tungland, Lars Magne and Rudolf, Micha{\l} and Nito{\'n}, Bart{\l}omiej and Aires, Jos{\'e} and Mendes, Am{\'a}lia and Cardoso, Aida and Pereira, Rui and Yrj{\"a}n{\"a}inen, V{\"a}in{\"o} and Nor{\'e}n, Fredrik Mohammadi and Magnusson, M{\aa}ns and Jarlbrink, Johan and Meden, Katja and Pan{\v c}ur, Andrej and Ojster{\v s}ek, Mihael and {\c C}{\"o}ltekin, {\c C}a{\u g}r{\i} and Kryvenko, Anna},
 url = {http://hdl.handle.net/11356/1486},
 note = {Slovenian language resource repository {CLARIN}.{SI}},
 copyright = {Creative Commons - Attribution 4.0 International ({CC} {BY} 4.0)},
 issn = {2820-4042},
 year = {2023}
}
"""

_DESCRIPTION = """\
ParlaMint 3.0 is a multilingual set of 26 comparable corpora containing parliamentary debates mostly starting in 2015 and extending to mid-2022. 
The corpora have extensive metadata, including aspects of the parliament; the speakers (name, gender, MP status, party affiliation, party coalition/opposition); 
are structured into time-stamped terms, sessions and meetings; and with speeches being marked by the speaker and their role (e.g. chair, regular speaker). 
The speeches also contain marked-up transcriber comments, such as gaps in the transcription, interruptions, applause, etc. 
Note that some corpora have further information, e.g. the year of birth of the speakers, links to their Wikipedia articles, their membership in various committees, etc. 
The corpora are also marked to the subcorpus they belong to ("reference", until 2020-01-30, "covid", from 2020-01-31, and "war", from 2022-02-24). 
The corpora are encoded according to the Parla-CLARIN TEI recommendation (https://clarin-eric.github.io/parla-clarin/), but have been encoded against the compatible, 
but much stricter ParlaMint encoding guidelines (https://clarin-eric.github.io/ParlaMint/) and schemas (included in this distribution). 
This entry contains the ParlaMint TEI-encoded corpora with the derived plain text versions of the corpora along with TSV metadata of the speeches. 
Also included is the 3.0 release of the data and scripts available at the GitHub repository of the ParlaMint project. 
This dataset contains only Slovenian parliamentary debates. 
"""


_HOMEPAGE = "http://hdl.handle.net/11356/1486"

_LICENSE = "Creative Commons - Attribution 4.0 International (CC BY 4.0)"

_URLS = {
    "parlamint": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1486/ParlaMint-SI.tgz?sequence=24&isAllowed=y",
}


class ParlaMintSI(datasets.GeneratorBasedBuilder):
    """This dataset contains transcriptions of Slovenian parliamentary debates and relevant metadata."""

    VERSION = datasets.Version("1.1.0")

    def _info(self):
        features = datasets.Features(
            {
                "ID": datasets.Value("string"),
                "Title": datasets.Value("string"),
                "Date": datasets.Value("string"),
                "Body": datasets.Value("string"),
                "Term": datasets.Value("string"),
                "Session": datasets.Value("string"),
                "Meeting": datasets.Value("int32"),
                "Sitting": datasets.Value("string"),
                "Agenda": datasets.Value("string"),
                "Subcorpus": datasets.Value("string"),
                "Speaker_role": datasets.Value("string"),
                "Speaker_MP": datasets.Value("string"),
                "Speaker_Minister": datasets.Value("string"),
                "Speaker_party": datasets.Value("string"),
                "Speaker_party_name": datasets.Value("string"),
                "Party_status": datasets.Value("string"),
                "Speaker_name": datasets.Value("string"),
                "Speaker_gender": datasets.Value("string"),
                "Speaker_birth": datasets.Value("string"),
                "text": datasets.Value("string")
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        urls = _URLS["parlamint"]
        download_path = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": download_path,
                },
            ),
        ]

    def _generate_examples(self, filepath):
        filepath = os.path.join(filepath, "ParlaMint-SI.txt")

        for year_dir in os.listdir(filepath):
            year_path = os.path.join(filepath, year_dir)
            if os.path.isdir(year_path):
                tsv_files = [f for f in os.listdir(year_path) if f.endswith(".tsv")]
                for tsv_file in tsv_files:
                    tsv_path = os.path.join(year_path, tsv_file)
                    txt_path = os.path.join(year_path, tsv_file.replace("-meta.tsv", ".txt"))

                    with open(tsv_path, "r", encoding="utf-8") as tsv, open(txt_path, "r", encoding="utf-8") as txt:
                        tsv_reader = csv.DictReader(tsv, delimiter="\t")
                        txt_content = txt.readlines()

                        for row in tsv_reader:
                            id_ = row.get("ID", "")
                            text = next((line.split("\t")[1] for line in txt_content if line.startswith(id_)), "")
                            example = {key: row.get(key, "") for key in row}
                            example["text"] = text
                            yield id_, example