multilingual-sentiments / multilingual_sentiments.py
tyqiangz
added english and indonesian datasets, trying Huggingface data loading script
e1ff19e
raw
history blame
4.26 kB
import os
import json
import datasets
from typing import List
import logging
import csv
_DESCRIPTION = """\
A collection of multilingual 3-class sentiments (positive, neutral, negative) dataset.
Most multilingual sentiment datasets are either 5-class ratings of products reviews (e.g. Amazon multilingual dataset) or don't include Asian languages (e.g. Malay).
"""
class MultilingualSentimentsConfig(datasets.BuilderConfig):
"""BuilderConfig for IndoNLU"""
def __init__(
self,
features,
train_url,
valid_url,
test_url,
citation,
**kwargs,
):
"""BuilderConfig for Multilingual Sentiments.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the txt/csv/tsv file
label_column: `string`, name of the column in the txt/csv/tsv file corresponding
to the label
label_classes: `list[string]`, the list of classes if the label is categorical
train_url: `string`, url to train file from
valid_url: `string`, url to valid file from
test_url: `string`, url to test file from
citation: `string`, citation for the data set
**kwargs: keyword arguments forwarded to super.
"""
super(MultilingualSentimentsConfig, self).__init__(
version=datasets.Version("1.0.0"), **kwargs
)
self.features = features
self.train_url = train_url
self.valid_url = valid_url
self.test_url = test_url
class MultilingualSentimentsConfig(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MultilingualSentimentsConfig(
name="en",
description="English sentiment analysis dataset",
features=["text", "label"],
train_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/en/train.tsv",
valid_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/en/val.tsv",
test_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/en/test.tsv",
version=datasets.Version("0.0.0")
),
MultilingualSentimentsConfig(
name="id",
description="Indonesian sentiment analysis dataset",
features=["text", "label"],
train_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/id/train.tsv",
valid_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/id/val.tsv",
test_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/id/test.tsv"
version=datasets.Version("0.0.0")
)
]
def _info(self):
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features({
"text": datasets.Value("string"),
"label": datasets.Value("string")
})
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
train_path = dl_manager.download_and_extract(self.config.train_url)
valid_path = dl_manager.download_and_extract(self.config.valid_url)
test_path = dl_manager.download_and_extract(self.config.test_url)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={
"filepath": valid_path}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"filepath": test_path}),
]
def _generate_examples(self, filepath):
logging.info("generating examples from = %s", filepath)
with open(filepath) as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for id_, row in enumerate(reader):
text, label = row
yield id_, {"text": text, "label": label}