debug fix
Browse files
metric.py
CHANGED
@@ -28,7 +28,7 @@ from tqdm import tqdm
|
|
28 |
# from eval_mixtral import LLM_eval, creat_prompt
|
29 |
# from eval_mixtral import creat_prompt
|
30 |
# from llm_evaluator import chat_llm
|
31 |
-
|
32 |
from dotenv import load_dotenv
|
33 |
|
34 |
from genai.client import Client
|
@@ -61,7 +61,7 @@ def heading(text: str) -> str:
|
|
61 |
def chat_llm_batch(model_id, prompts, limit= 20):
|
62 |
parameters = TextGenerationParameters(
|
63 |
# decoding_method=DecodingMethod.SAMPLE, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=50, top_p=1, random_seed=42
|
64 |
-
decoding_method=DecodingMethod.GREEDY, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=
|
65 |
)
|
66 |
client = Client(credentials=Credentials.from_env())
|
67 |
response_list = []
|
@@ -406,6 +406,7 @@ def mmmu_eval(submission_dict_of_dict, solution_dict_of_dict, category=None, cat
|
|
406 |
|
407 |
|
408 |
def compute(params):
|
|
|
409 |
# Download the two json file
|
410 |
# category_
|
411 |
category_to_sample_ids_dict_file = hf_hub_download(
|
|
|
28 |
# from eval_mixtral import LLM_eval, creat_prompt
|
29 |
# from eval_mixtral import creat_prompt
|
30 |
# from llm_evaluator import chat_llm
|
31 |
+
import random
|
32 |
from dotenv import load_dotenv
|
33 |
|
34 |
from genai.client import Client
|
|
|
61 |
def chat_llm_batch(model_id, prompts, limit= 20):
|
62 |
parameters = TextGenerationParameters(
|
63 |
# decoding_method=DecodingMethod.SAMPLE, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=50, top_p=1, random_seed=42
|
64 |
+
decoding_method=DecodingMethod.GREEDY, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=50, top_p=0, random_seed=42,
|
65 |
)
|
66 |
client = Client(credentials=Credentials.from_env())
|
67 |
response_list = []
|
|
|
406 |
|
407 |
|
408 |
def compute(params):
|
409 |
+
random.seed(42)
|
410 |
# Download the two json file
|
411 |
# category_
|
412 |
category_to_sample_ids_dict_file = hf_hub_download(
|