Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
utcd / utcd.py
StefanH's picture
Update: UTCD column types
62391ed
"""Universal Text Classification Dataset (UTCD)"""
import os
import json
from os.path import join as os_join
from typing import List
import datasets
from huggingface_hub import hf_hub_download
_DESCRIPTION = """
UTCD is a compilation of 18 classification datasets spanning 3 categories of Sentiment,
Intent/Dialogue and Topic classification. UTCD focuses on the task of zero-shot text classification where the
candidate labels are descriptive of the text being classified. UTCD consists of ~ 6M/800K train/test examples.
"""
# TODO: citation
_URL = "https://github.com/ChrisIsKing/zero-shot-text-classification/tree/master"
_URL_ZIP = "https://huggingface.co./datasets/claritylab/UTCD/raw/main/datasets.zip"
_VERSION = datasets.Version('0.0.1')
class UtcdConfig(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, domain: str, normalize_aspect: bool = False, **kwargs):
"""BuilderConfig for UTCD.
Args:
domain: `string`, dataset domain, one of [`in`, `out`].
normalize_aspect: `bool`, if True, an aspect-normalized version of the dataset is returned.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 0.0.1: Initial version.
super(UtcdConfig, self).__init__(version=_VERSION, **kwargs)
assert domain in ['in', 'out']
self.domain = domain
self.normalize_aspect = normalize_aspect
def to_dir_name(self):
"""
:return: directory name for the dataset files for this config stored on hub
"""
domain_str = 'in-domain' if self.domain == 'in' else 'out-of-domain'
prefix = 'aspect-normalized-' if self.normalize_aspect else ''
return f'{prefix}{domain_str}'
# for getting dataset viewer working on hub, don't have write access to /.cache
config_fnm = hf_hub_download(
repo_id='claritylab/utcd', filename='_utcd_info.json', cache_dir=os.path.dirname(__file__), repo_type='dataset'
)
with open(config_fnm) as f:
_config = json.load(f)
_split2hf_split = dict(train=datasets.Split.TRAIN, eval=datasets.Split.VALIDATION, test=datasets.Split.TEST)
class Utcd(datasets.GeneratorBasedBuilder):
"""UTCD: Universal Text Classification Dataset. Version 0.0."""
VERSION = _VERSION
BUILDER_CONFIGS = [
UtcdConfig(
name='in-domain',
description='All in-domain datasets.',
domain='in',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-in-domain',
description='Aspect-normalized version of all in-domain datasets.',
domain='in',
normalize_aspect=True
),
UtcdConfig(
name='out-of-domain',
description='All out-of-domain datasets.',
domain='out',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-out-of-domain',
description='Aspect-normalized version of all out-of-domain datasets.',
domain='out',
normalize_aspect=True
)
]
DEFAULT_CONFIG_NAME = 'in-domain'
def _get_dataset_names(self):
return [dnm for dnm, d_dset in _config.items() if d_dset['domain'] == self.config.domain]
def _info(self):
dnms = self._get_dataset_names()
# labels = [_config[dnm]['splits'][split]['labels'] for dnm in dnms for split in ['train', 'test']]
# labels = sorted(set().union(*labels)) # drop duplicate labels across datasets
aspects = [d['aspect'] for dnm, d in _config.items()]
aspects = sorted(set(aspects)) # drop duplicate aspects
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
text=datasets.Value(dtype='string'),
# labels=datasets.Sequence(feature=datasets.ClassLabel(names=labels), length=-1), # for multi-label
labels=datasets.Sequence(feature=datasets.Value(dtype='string'), length=-1),
dataset_name=datasets.ClassLabel(names=dnms),
aspect=datasets.ClassLabel(names=aspects)
),
homepage=_URL
# TODO: citation
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
# for aspect-normalized versions of the dataset, we include a validation set
splits = ['train', 'eval', 'test'] if self.config.normalize_aspect else ['train', 'test']
dnms = self._get_dataset_names()
dir_nm = self.config.to_dir_name()
# TODO: update root dataset naming version & dataset split naming
base_path = dl_manager.download_and_extract('datasets.zip')
split2paths = {s: [os_join(base_path, f'{dir_nm}_split', dnm, f'{s}.json') for dnm in dnms] for s in splits}
# order of dataset file paths will be deterministic for deterministic dataset name ordering
return [
datasets.SplitGenerator(name=_split2hf_split[s], gen_kwargs=dict(filepath=split2paths[s])) for s in splits
]
def _generate_examples(self, filepath: List[str]):
id_ = 0
for path in filepath: # each file for one split of one dataset
dnm = path.split(os.sep)[-2]
aspect = _config[dnm]['aspect']
with open(path, encoding='utf-8') as fl:
dset = json.load(fl)
for txt, labels in dset.items():
yield id_, dict(text=txt, labels=labels, dataset_name=dnm, aspect=aspect)
id_ += 1