Datasets:
File size: 4,255 Bytes
e1ff19e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import os
import json
import datasets
from typing import List
import logging
import csv
_DESCRIPTION = """\
A collection of multilingual 3-class sentiments (positive, neutral, negative) dataset.
Most multilingual sentiment datasets are either 5-class ratings of products reviews (e.g. Amazon multilingual dataset) or don't include Asian languages (e.g. Malay).
"""
class MultilingualSentimentsConfig(datasets.BuilderConfig):
"""BuilderConfig for IndoNLU"""
def __init__(
self,
features,
train_url,
valid_url,
test_url,
citation,
**kwargs,
):
"""BuilderConfig for Multilingual Sentiments.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the txt/csv/tsv file
label_column: `string`, name of the column in the txt/csv/tsv file corresponding
to the label
label_classes: `list[string]`, the list of classes if the label is categorical
train_url: `string`, url to train file from
valid_url: `string`, url to valid file from
test_url: `string`, url to test file from
citation: `string`, citation for the data set
**kwargs: keyword arguments forwarded to super.
"""
super(MultilingualSentimentsConfig, self).__init__(
version=datasets.Version("1.0.0"), **kwargs
)
self.features = features
self.train_url = train_url
self.valid_url = valid_url
self.test_url = test_url
class MultilingualSentimentsConfig(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MultilingualSentimentsConfig(
name="en",
description="English sentiment analysis dataset",
features=["text", "label"],
train_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/en/train.tsv",
valid_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/en/val.tsv",
test_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/en/test.tsv",
version=datasets.Version("0.0.0")
),
MultilingualSentimentsConfig(
name="id",
description="Indonesian sentiment analysis dataset",
features=["text", "label"],
train_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/id/train.tsv",
valid_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/id/val.tsv",
test_url="https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/id/test.tsv"
version=datasets.Version("0.0.0")
)
]
def _info(self):
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features({
"text": datasets.Value("string"),
"label": datasets.Value("string")
})
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
train_path = dl_manager.download_and_extract(self.config.train_url)
valid_path = dl_manager.download_and_extract(self.config.valid_url)
test_path = dl_manager.download_and_extract(self.config.test_url)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={
"filepath": valid_path}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"filepath": test_path}),
]
def _generate_examples(self, filepath):
logging.info("generating examples from = %s", filepath)
with open(filepath) as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for id_, row in enumerate(reader):
text, label = row
yield id_, {"text": text, "label": label}
|