albertvillanova HF staff commited on
Commit
524e209
1 Parent(s): a4b20f4

Remove ARC task

Browse files
Files changed (1) hide show
  1. src/constants.py +2 -2
src/constants.py CHANGED
@@ -14,7 +14,7 @@ RESULTS_DATASET_ID = "datasets/open-llm-leaderboard/results"
14
  DETAILS_DATASET_ID = "datasets/open-llm-leaderboard/{model_name_sanitized}-details"
15
  DETAILS_FILENAME = "samples_{subtask}_*.json"
16
  TASKS = {
17
- "leaderboard_arc_challenge": ("ARC", "leaderboard_arc_challenge"),
18
  "leaderboard_bbh": ("BBH", "leaderboard_bbh"),
19
  "leaderboard_gpqa": ("GPQA", "leaderboard_gpqa"),
20
  "leaderboard_ifeval": ("IFEval", "leaderboard_ifeval"),
@@ -23,7 +23,7 @@ TASKS = {
23
  "leaderboard_musr": ("MuSR", "leaderboard_musr"),
24
  }
25
  SUBTASKS = {
26
- "leaderboard_arc_challenge": ["leaderboard_arc_challenge"],
27
  "leaderboard_bbh": [
28
  "leaderboard_bbh_boolean_expressions",
29
  "leaderboard_bbh_causal_judgement",
 
14
  DETAILS_DATASET_ID = "datasets/open-llm-leaderboard/{model_name_sanitized}-details"
15
  DETAILS_FILENAME = "samples_{subtask}_*.json"
16
  TASKS = {
17
+ # "leaderboard_arc_challenge": ("ARC", "leaderboard_arc_challenge"),
18
  "leaderboard_bbh": ("BBH", "leaderboard_bbh"),
19
  "leaderboard_gpqa": ("GPQA", "leaderboard_gpqa"),
20
  "leaderboard_ifeval": ("IFEval", "leaderboard_ifeval"),
 
23
  "leaderboard_musr": ("MuSR", "leaderboard_musr"),
24
  }
25
  SUBTASKS = {
26
+ # "leaderboard_arc_challenge": ["leaderboard_arc_challenge"],
27
  "leaderboard_bbh": [
28
  "leaderboard_bbh_boolean_expressions",
29
  "leaderboard_bbh_causal_judgement",