Datasets:
Tasks:
Text Classification
Sub-tasks:
sentiment-classification
Languages:
Spanish
Size:
1K<n<10K
License:
File size: 3,885 Bytes
9c896da 8c5a016 9c896da 8c5a016 9c896da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
from xml.dom.minidom import parseString
import datasets
# no BibTeX citation
_CITATION = ""
_DESCRIPTION = """\
The Muchocine reviews dataset contains 3,872 longform movie reviews in Spanish language,
each with a shorter summary review, and a rating on a 1-5 scale.
"""
_LICENSE = "CC-BY-2.1"
_URLs = {"default": "http://www.lsi.us.es/~fermin/corpusCine.zip"}
class Muchocine(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.1")
def _info(self):
features = datasets.Features(
{
"review_body": datasets.Value("string"),
"review_summary": datasets.Value("string"),
"star_rating": datasets.ClassLabel(names=[str(i) for i in range(1, 6)]),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="http://www.lsi.us.es/~fermin/index.php/Datasets",
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": sorted(glob.glob(os.path.join(data_dir, "corpusCriticasCine", "*.xml"))),
"split": "train",
},
),
]
def _generate_examples(self, filepaths, split):
for filepath in filepaths:
with open(filepath, encoding="latin-1") as f:
id = re.search(r"\d+\.xml", filepath)[0][:-4]
txt = f.read()
txt = txt.replace("“", '"').replace("”", '"').replace("…", "")
txt = txt.replace("‘", '"').replace("’", '"').replace("′", "")
txt = txt.replace("à", "à").replace("–", "-").replace("è", "è")
txt = txt.replace("ö", "ö").replace("ç", "ç").replace("&", "and")
try:
doc = parseString(txt)
except Exception as e:
# skip 6 malformed xml files, for example unescaped < and >
_ = e
continue
btxt = ""
review_bod = doc.getElementsByTagName("body")
if len(review_bod) > 0:
for node in review_bod[0].childNodes:
if node.nodeType == node.TEXT_NODE:
btxt += node.data + " "
rtxt = ""
review_summ = doc.getElementsByTagName("summary")
if len(review_summ) > 0:
for node in review_summ[0].childNodes:
if node.nodeType == node.TEXT_NODE:
rtxt += node.data + " "
yield id, {
"review_body": btxt,
"review_summary": rtxt,
"star_rating": doc.documentElement.attributes["rank"].value,
}
|