|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
ParlaMint 3.0 is a multilingual set of 26 comparable corpora containing parliamentary debates mostly starting in 2015 and extending to mid-2022. |
|
|
|
The corpora have extensive metadata, including aspects of the parliament; the speakers (name, gender, MP status, party affiliation, party coalition/opposition); |
|
are structured into time-stamped terms, sessions and meetings; and with speeches being marked by the speaker and their role (e.g. chair, regular speaker). |
|
The speeches also contain marked-up transcriber comments, such as gaps in the transcription, interruptions, applause, etc. |
|
Note that some corpora have further information, e.g. the year of birth of the speakers, links to their Wikipedia articles, their membership in various committees, etc. |
|
The corpora are also marked to the subcorpus they belong to ("reference", until 2020-01-30, "covid", from 2020-01-31, and "war", from 2022-02-24). |
|
|
|
The corpora are encoded according to the Parla-CLARIN TEI recommendation (https://clarin-eric.github.io/parla-clarin/), but have been encoded against the compatible, |
|
but much stricter ParlaMint encoding guidelines (https://clarin-eric.github.io/ParlaMint/) and schemas (included in this distribution. |
|
|
|
This entry contains the ParlaMint TEI-encoded corpora with the derived plain text versions of the corpora along with TSV metadata of the speeches. |
|
Also included is the 3.0 release of the data and scripts available at the GitHub repository of the ParlaMint project. |
|
|
|
This dataset contains only Slovenian parliamentary debates. |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1486" |
|
|
|
_LICENSE = "Creative Commons - Attribution 4.0 International (CC BY 4.0)" |
|
|
|
_URLS = { |
|
"parlamint": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1486/ParlaMint-SI.tgz?sequence=24&isAllowed=y", |
|
} |
|
|
|
|
|
class ParlaMintSI(datasets.GeneratorBasedBuilder): |
|
"""This dataset contains transcriptions of Slovenian parliamentary debates and relevant metadata.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"ID": datasets.Value("string"), |
|
"Title": datasets.Value("string"), |
|
"Date": datasets.Value("string"), |
|
"Body": datasets.Value("string"), |
|
"Term": datasets.Value("string"), |
|
"Session": datasets.Value("string"), |
|
"Meeting": datasets.Value("int32"), |
|
"Sitting": datasets.Value("string"), |
|
"Agenda": datasets.Value("string"), |
|
"Subcorpus": datasets.Value("string"), |
|
"Speaker_role": datasets.Value("string"), |
|
"Speaker_MP": datasets.Value("string"), |
|
"Speaker_Minister": datasets.Value("string"), |
|
"Speaker_party": datasets.Value("string"), |
|
"Speaker_party_name": datasets.Value("string"), |
|
"Party_status": datasets.Value("string"), |
|
"Speaker_name": datasets.Value("string"), |
|
"Speaker_gender": datasets.Value("string"), |
|
"Speaker_birth": datasets.Value("string"), |
|
"text": datasets.Value("string") |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["parlamint"] |
|
download_path = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": download_path, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
filepath = os.path.join(filepath, "ParlaMint-SI.txt") |
|
|
|
for year_dir in os.listdir(filepath): |
|
year_path = os.path.join(filepath, year_dir) |
|
if os.path.isdir(year_path): |
|
tsv_files = [f for f in os.listdir(year_path) if f.endswith(".tsv")] |
|
for tsv_file in tsv_files: |
|
tsv_path = os.path.join(year_path, tsv_file) |
|
txt_path = os.path.join(year_path, tsv_file.replace("-meta.tsv", ".txt")) |
|
|
|
with open(tsv_path, "r", encoding="utf-8") as tsv, open(txt_path, "r", encoding="utf-8") as txt: |
|
tsv_reader = csv.DictReader(tsv, delimiter="\t") |
|
txt_content = txt.readlines() |
|
|
|
for row in tsv_reader: |
|
id_ = row.get("ID", "") |
|
text = next((line.split("\t")[1] for line in txt_content if line.startswith(id_)), "") |
|
example = {key: row.get(key, "") for key in row} |
|
example["text"] = text |
|
yield id_, example |
|
|