{ | |
"results": { | |
"arc_pt": { | |
"acc": 0.26666666666666666, | |
"acc_stderr": 0.012933850109759568, | |
"acc_norm": 0.30256410256410254, | |
"acc_norm_stderr": 0.013435492568854205 | |
}, | |
"hellaswag_pt": { | |
"acc": 0.37772239679271863, | |
"acc_stderr": 0.005046899546439457, | |
"acc_norm": 0.47014844511864773, | |
"acc_norm_stderr": 0.00519566110400487 | |
}, | |
"truthfulqa_pt": { | |
"mc1": 0.24111675126903553, | |
"mc1_stderr": 0.015248032494426427, | |
"mc2": 0.3911004265246538, | |
"mc2_stderr": 0.014560723432804852 | |
} | |
}, | |
"versions": { | |
"arc_pt": 0, | |
"hellaswag_pt": 1, | |
"truthfulqa_pt": 1 | |
}, | |
"config": { | |
"model": "hf-auto", | |
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17049106/step_1960000", | |
"batch_size": 1, | |
"device": "cuda:0", | |
"no_cache": false, | |
"limit": null, | |
"bootstrap_iters": 100000, | |
"description_dict": {} | |
} | |
} |