|
import datasets |
|
import logging |
|
import csv |
|
import sys |
|
from csv import DictReader |
|
csv.field_size_limit(sys.maxsize) |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
class FFV4Config(datasets.BuilderConfig): |
|
"""BuilderConfig for SuperGLUE.""" |
|
|
|
def __init__(self, filename: str, info: str, **kwargs): |
|
"""BuilderConfig for SuperGLUE. |
|
|
|
Args: |
|
features: *list[string]*, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
filename: *string*, csvfile for the dataset. |
|
info: *string*, for information about the data set. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
|
|
super().__init__(version=datasets.Version("0.0.1"), **kwargs) |
|
self.filename = filename |
|
self.info = info |
|
|
|
class FFV4(datasets.GeneratorBasedBuilder): |
|
"""The thing""" |
|
|
|
BUILDER_CONFIGS = [ |
|
FFV4Config( |
|
name="notebook_defaults", |
|
filename="notebook_defaults.csv", |
|
info="the result of using the default values in the V4 ffarchive notebook, except without the TS/RD filter", |
|
), |
|
FFV4Config( |
|
name="notebook_defaults_ratio0.8_likes10", |
|
filename="ratio0.8_likes10.csv", |
|
info="default filter, but with the score filter replaced with '.ratio > 0.8, .likes > 10'", |
|
), |
|
] |
|
DEFAULT_CONFIG_NAME = "notebook_defaults" |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="Garbage datasets for LLM training", |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"header": datasets.Value("string"), |
|
"story": datasets.Value("string"), |
|
} |
|
), |
|
homepage="https://main.horse", |
|
) |
|
|
|
|
|
def _split_generators(self, x): |
|
return [ |
|
datasets.SplitGenerator('everything', gen_kwargs={"filepath": self.config.filename}), |
|
] |
|
''' |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), |
|
''' |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as f: |
|
dr = DictReader(f) |
|
for d in dr: |
|
yield d['id'],d |
|
''' |
|
squad = json.load(f) |
|
for article in squad["data"]: |
|
title = article.get("title", "") |
|
for paragraph in article["paragraphs"]: |
|
context = paragraph["context"] # do not strip leading blank spaces GH-2585 |
|
for qa in paragraph["qas"]: |
|
answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
|
answers = [answer["text"] for answer in qa["answers"]] |
|
# Features currently used are "context", "question", and "answers". |
|
# Others are extracted here for the ease of future expansions. |
|
yield key, { |
|
"title": title, |
|
"context": context, |
|
"question": qa["question"], |
|
"id": qa["id"], |
|
"answers": { |
|
"answer_start": answer_starts, |
|
"text": answers, |
|
}, |
|
} |
|
key += 1 |
|
''' |
|
|