set mixtral api seed
Browse files
metric.py
CHANGED
@@ -60,7 +60,7 @@ def heading(text: str) -> str:
|
|
60 |
|
61 |
def chat_llm_batch(model_id, prompts, limit= 20):
|
62 |
parameters = TextGenerationParameters(
|
63 |
-
decoding_method=DecodingMethod.SAMPLE, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=50, top_p=1
|
64 |
)
|
65 |
client = Client(credentials=Credentials.from_env())
|
66 |
response_list = []
|
@@ -383,8 +383,8 @@ def mmmu_eval(submission_dict_of_dict, solution_dict_of_dict, category=None, cat
|
|
383 |
answer = str(output['pred'])
|
384 |
# label = str(solution_dict_of_dict.iloc[row]['pred'])
|
385 |
label = str(solution_dict_of_dict[output_id]['pred'])
|
386 |
-
print('answer:', answer)
|
387 |
-
print('label:', label)
|
388 |
|
389 |
# this is applied on samples with question type of "short-answer"
|
390 |
# currently all the samples have the question type of "short-answer"
|
|
|
60 |
|
61 |
def chat_llm_batch(model_id, prompts, limit= 20):
|
62 |
parameters = TextGenerationParameters(
|
63 |
+
decoding_method=DecodingMethod.SAMPLE, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=50, top_p=1, random_seed=42
|
64 |
)
|
65 |
client = Client(credentials=Credentials.from_env())
|
66 |
response_list = []
|
|
|
383 |
answer = str(output['pred'])
|
384 |
# label = str(solution_dict_of_dict.iloc[row]['pred'])
|
385 |
label = str(solution_dict_of_dict[output_id]['pred'])
|
386 |
+
# print('answer:', answer)
|
387 |
+
# print('label:', label)
|
388 |
|
389 |
# this is applied on samples with question type of "short-answer"
|
390 |
# currently all the samples have the question type of "short-answer"
|