eduagarcia commited on
Commit
e7e3c60
1 Parent(s): b0d53b2

update script to remove fewshot split

Browse files
data/fewshot-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a9616244730b16c2206f28abda97ceb484d79381a33c474380d3f4369a25402
3
- size 19101
 
 
 
 
data/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7ffb343ba1006a2d87ff746e7974eaa80848a3cb82b234defe1e24875db3d1f
3
- size 1019953
 
 
 
 
enem_challenge_disabled.py CHANGED
@@ -243,34 +243,24 @@ class ENEMChallenge(datasets.GeneratorBasedBuilder):
243
 
244
  def _split_generators(self, dl_manager):
245
  downloaded_file = dl_manager.download(FILE_URL)
246
- fewshot = _get_train_examples()
247
  dataset = {
248
  '2022': datasets.load_dataset(DATASET_PATH, name="2022", split="train"),
249
  '2023': datasets.load_dataset(DATASET_PATH, name="2023", split="train")
250
  }
251
 
252
- #filter fewshot
253
- to_filter = [int(d['id'].split('_')[1]) for d in fewshot]
254
- dataset['2022'] = dataset['2022'].filter(lambda x: int(x['id'].split('_')[1]) not in to_filter)
255
-
256
  return [
257
  datasets.SplitGenerator(
258
- name=datasets.Split.TEST,
259
  gen_kwargs={
260
  "data_list": [downloaded_file, dataset],
261
- "types": ["zip", "dataset"]
262
- }
263
- ),
264
- datasets.SplitGenerator(
265
- name='fewshot',
266
- gen_kwargs={
267
- "data_list": [fewshot],
268
- "types": ["list"]
269
  }
270
- )
271
  ]
272
 
273
- def _generate_examples(self, data_list, types):
274
  for data, type in zip(data_list, types):
275
  if type == "zip":
276
  with zipfile.ZipFile(data, 'r') as zip_ref:
@@ -301,26 +291,8 @@ class ENEMChallenge(datasets.GeneratorBasedBuilder):
301
  "question": document["context"] + '\n' + document["question"],
302
  "choices": choices,
303
  "answerKey": document["label"].upper(),
304
- "explanation": None,
305
  }
306
- if type == "list":
307
- for document in data:
308
- choices = {
309
- "text": document["options"],
310
- "label": ["A", "B", "C", "D", "E"]
311
- }
312
- doc_id = document['id']
313
- yield doc_id, {
314
- "id": doc_id,
315
- "question_number": int(doc_id.split('_')[1]),
316
- "exam_id": document['exam'],
317
- "exam_year": document['exam'].split('_')[0],
318
- "nullified": None,
319
- "question": document["context"] + '\n' + document["question"],
320
- "choices": choices,
321
- "answerKey": document["label"].upper(),
322
- "explanation": document["explanation"],
323
- }
324
  if type == "dataset":
325
  for exam_id in data:
326
  dataset = data[exam_id]
@@ -342,6 +314,6 @@ class ENEMChallenge(datasets.GeneratorBasedBuilder):
342
  "question": document["question"],
343
  "choices": choices,
344
  "answerKey": document["label"].upper(),
345
- "explanation": None,
346
  }
347
 
 
243
 
244
  def _split_generators(self, dl_manager):
245
  downloaded_file = dl_manager.download(FILE_URL)
246
+ explanation_questions = {d['id']:d['explanation'] for d in _get_train_examples()}
247
  dataset = {
248
  '2022': datasets.load_dataset(DATASET_PATH, name="2022", split="train"),
249
  '2023': datasets.load_dataset(DATASET_PATH, name="2023", split="train")
250
  }
251
 
 
 
 
 
252
  return [
253
  datasets.SplitGenerator(
254
+ name=datasets.Split.TRAIN,
255
  gen_kwargs={
256
  "data_list": [downloaded_file, dataset],
257
+ "types": ["zip", "dataset"],
258
+ "explanation_questions": explanation_questions
 
 
 
 
 
 
259
  }
260
+ )
261
  ]
262
 
263
+ def _generate_examples(self, data_list, types, explanation_questions):
264
  for data, type in zip(data_list, types):
265
  if type == "zip":
266
  with zipfile.ZipFile(data, 'r') as zip_ref:
 
291
  "question": document["context"] + '\n' + document["question"],
292
  "choices": choices,
293
  "answerKey": document["label"].upper(),
294
+ "explanation": explanation_questions.get(doc_id, None)
295
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  if type == "dataset":
297
  for exam_id in data:
298
  dataset = data[exam_id]
 
314
  "question": document["question"],
315
  "choices": choices,
316
  "answerKey": document["label"].upper(),
317
+ "explanation": explanation_questions.get(doc_id, None)
318
  }
319