File size: 4,060 Bytes
5c55837 15697de 5c55837 f827d24 5c55837 15697de 5c55837 15697de 5c55837 e37b773 5c55837 71a5ff1 5c55837 e37b773 5c55837 71a5ff1 97d3a43 71a5ff1 5c55837 71a5ff1 5c55837 4fac713 e37b773 5c55837 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import json
import os
import datasets
_CITATION = """\
@inproceedings{Kumar2022IndicNLGSM,
title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
year={2022},
url = "https://arxiv.org/abs/2203.05437"
}
"""
_DESCRIPTION = """\
This is the paraphrasing dataset released as part of IndicNLG Suite. Each
input is paired with up to 5 references. We create this dataset in eleven
languages including as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. The total
size of the dataset is 5.57M.
"""
_HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
_LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
_URL = "https://huggingface.co./datasets/ai4bharat/IndicParaphrase/resolve/main/data/{}_IndicParaphrase_v{}.zip"
_LANGUAGES = [
"as",
"bn",
"gu",
"hi",
"kn",
"ml",
"mr",
"or",
"pa",
"ta",
"te"
]
class IndicParaphrase(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="{}".format(lang),
version=datasets.Version("1.0.0")
)
for lang in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"pivot": datasets.Value("string"),
"input": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")]
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang = str(self.config.name)
url = _URL.format(lang, self.VERSION.version_str[:-2])
data_dir = dl_manager.download_and_extract(url)
if lang == 'as' :
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test_" + lang + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "dev_" + lang + ".jsonl"),
},
),
]
else :
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "train_" + lang + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test_" + lang + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "dev_" + lang + ".jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"id": data["id"],
"pivot": data["pivot"],
"input": data["input"],
"target": data["target"],
"references": data["references"]
}
|