File size: 3,965 Bytes
a86c725 9628ad8 a86c725 f55b916 a86c725 f55b916 a86c725 51ae812 dd6b4ee 51ae812 dd6b4ee a86c725 1b391ad ef24038 a86c725 1b391ad ef24038 a86c725 ef24038 982f955 ceaf162 7fe2beb c8b10d3 ef24038 7fe2beb ef24038 31bc736 ef24038 a86c725 51ae812 ef24038 a86c725 5541e54 a86c725 ef24038 74f67ec 51ae812 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import os
from openai import OpenAI
if "OPENAI" in os.environ:
print('Loaded OPENAI Key.')
else:
print('Can not load the OPENAI key.')
client = OpenAI(api_key = os.environ['OPENAI'])
import pandas as pd
from huggingface_hub import hf_hub_download
def compute(params):
public_score = 0
private_score = 0
solution_file = hf_hub_download(
repo_id=params.competition_id,
filename="solution.csv",
token=params.token,
repo_type="dataset",
)
solution_df = pd.read_csv(solution_file)
submission_filename = f"submissions/{params.team_id}-{params.submission_id}.csv"
submission_file = hf_hub_download(
repo_id=params.competition_id,
filename=submission_filename,
token=params.token,
repo_type="dataset",
)
submission_df = pd.read_csv(submission_file)
public_ids = solution_df[solution_df.split == "public"][params.submission_id_col].values
private_ids = solution_df[solution_df.split == "private"][params.submission_id_col].values
public_solution_df = solution_df[solution_df[params.submission_id_col].isin(public_ids)]
public_submission_df = submission_df[submission_df[params.submission_id_col].isin(public_ids)]
private_solution_df = solution_df[solution_df[params.submission_id_col].isin(private_ids)]
private_submission_df = submission_df[submission_df[params.submission_id_col].isin(private_ids)]
public_solution_df = public_solution_df.sort_values(params.submission_id_col).reset_index(drop=True)
public_submission_df = public_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
private_solution_df = private_solution_df.sort_values(params.submission_id_col).reset_index(drop=True)
private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
# # METRICS Calculation Evaluation
# # _metric = SOME METRIC FUNCTION
# def _metric(outputs, targets):
# # input example: public_solution_df[target_cols], public_submission_df[target_cols]
# score = 0.5
# return score
print('public_solution_df', public_solution_df)
print('private_solution_df', private_solution_df)
## LLM Scoring Evaluation
def _metric(outputs, targets):
# inputs: public_solution_df[target_cols], public_submission_df[target_cols]
# output: score
for row, output in outputs.iterrows():
print('outputs type', type(outputs), 'targets type', type(outputs))
answer = str(output['pred'])
label = str(targets.iloc[row]['pred'])
print('answer:', answer)
print('label:', label)
prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {answer}. Caption two: {label}\nScore:"
try:
response = client.completions.create(
engine="gpt-3.5-turbo-instruct",
prompt=prompt,
temperature=0,
max_tokens=1,
)
eval_result = response.choices[0].text.strip()
print('eval_result', eval_result)
score = int(eval_result)
except:
print("Error: API Calling")
return
return score
target_cols = [col for col in solution_df.columns if col not in [params.submission_id_col, "split"]]
public_score = _metric(public_solution_df[target_cols], public_submission_df[target_cols])
private_score = _metric(private_solution_df[target_cols], private_submission_df[target_cols])
metric_name = "metric1"
metric_dict = {"public_score": {metric_name: public_score},
"private_score": {metric_name: private_score}
}
return metric_dict |