Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
Chinese
Size:
10K<n<100K
ArXiv:
Tags:
question-generation
License:
import json | |
import os | |
from random import shuffle, seed | |
def get_dict(filepath): | |
output = [] | |
with open(filepath) as f: | |
tmp = json.load(f)["data"] | |
for t in tmp: | |
for x in t["paragraphs"]: | |
context = x["context"] | |
for qa in x["qas"]: | |
if qa["is_impossible"]: | |
continue | |
answers = qa["answers"] | |
if len(answers) == 0: | |
continue | |
answer = answers[0]["text"] | |
if answer not in context: | |
continue | |
output.append({ | |
"answer": answer, | |
"context": context, | |
"question": qa["question"] | |
}) | |
return output | |
train = get_dict("dataset/train-zen-v1.0.json") | |
dev = get_dict("dataset/dev-zen-v1.0.json") | |
seed(42) | |
shuffle(train) | |
test = train[:len(dev)] | |
train = train[len(dev):] | |
with open("data/raw.train.jsonl", "w") as f: | |
f.write("\n".join([json.dumps(x) for x in train])) | |
with open("data/raw.valid.jsonl", "w") as f: | |
f.write("\n".join([json.dumps(x) for x in dev])) | |
with open("data/raw.test.jsonl", "w") as f: | |
f.write("\n".join([json.dumps(x) for x in test])) |