Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
Russian
Size:
10K<n<100K
ArXiv:
Tags:
question-generation
License:
update
Browse files- generate_reference_files.py +1 -1
- qg_ruquad.py +3 -3
generate_reference_files.py
CHANGED
@@ -6,7 +6,7 @@ os.makedirs('./reference_files', exist_ok=True)
|
|
6 |
|
7 |
|
8 |
for split in ['validation', 'test']:
|
9 |
-
dataset = load_dataset('asahi417/qg_ruquad', split=split)
|
10 |
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
11 |
with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f:
|
12 |
if data == 'paragraph':
|
|
|
6 |
|
7 |
|
8 |
for split in ['validation', 'test']:
|
9 |
+
dataset = load_dataset('asahi417/qg_ruquad', split=split, download_mode='force_redownload')
|
10 |
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
11 |
with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f:
|
12 |
if data == 'paragraph':
|
qg_ruquad.py
CHANGED
@@ -7,9 +7,9 @@ logger = datasets.logging.get_logger(__name__)
|
|
7 |
_DESCRIPTION = """[SberSQuAD](https://huggingface.co/datasets/sberquad) dataset for question generation (QG) task."""
|
8 |
_URL = 'https://huggingface.co/datasets/asahi417/qg_ruquad/raw/main/data/processed'
|
9 |
_URLS = {
|
10 |
-
str(datasets.Split.TEST): f'{_URL}/test.jsonl',
|
11 |
-
str(datasets.Split.TRAIN): f'{_URL}/train.jsonl',
|
12 |
-
str(datasets.Split.VALIDATION): f'{_URL}/validation.jsonl'
|
13 |
}
|
14 |
|
15 |
|
|
|
7 |
_DESCRIPTION = """[SberSQuAD](https://huggingface.co/datasets/sberquad) dataset for question generation (QG) task."""
|
8 |
_URL = 'https://huggingface.co/datasets/asahi417/qg_ruquad/raw/main/data/processed'
|
9 |
_URLS = {
|
10 |
+
str(datasets.Split.TEST): [f'{_URL}/test.jsonl'],
|
11 |
+
str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
|
12 |
+
str(datasets.Split.VALIDATION): [f'{_URL}/validation.jsonl']
|
13 |
}
|
14 |
|
15 |
|