|
import os |
|
from openai import OpenAI |
|
if "OPENAI" in os.environ: |
|
print('Loaded OPENAI Key.') |
|
else: |
|
print('Can not load the OPENAI key.') |
|
client = OpenAI(api_key = os.environ['OPENAI']) |
|
|
|
import pandas as pd |
|
from huggingface_hub import hf_hub_download |
|
|
|
def compute(params): |
|
public_score = 0 |
|
private_score = 0 |
|
|
|
solution_file = hf_hub_download( |
|
repo_id=params.competition_id, |
|
filename="solution.csv", |
|
token=params.token, |
|
repo_type="dataset", |
|
) |
|
|
|
solution_df = pd.read_csv(solution_file) |
|
|
|
submission_filename = f"submissions/{params.team_id}-{params.submission_id}.csv" |
|
submission_file = hf_hub_download( |
|
repo_id=params.competition_id, |
|
filename=submission_filename, |
|
token=params.token, |
|
repo_type="dataset", |
|
) |
|
submission_df = pd.read_csv(submission_file) |
|
|
|
public_ids = solution_df[solution_df.split == "public"][params.submission_id_col].values |
|
private_ids = solution_df[solution_df.split == "private"][params.submission_id_col].values |
|
|
|
public_solution_df = solution_df[solution_df[params.submission_id_col].isin(public_ids)] |
|
public_submission_df = submission_df[submission_df[params.submission_id_col].isin(public_ids)] |
|
|
|
private_solution_df = solution_df[solution_df[params.submission_id_col].isin(private_ids)] |
|
private_submission_df = submission_df[submission_df[params.submission_id_col].isin(private_ids)] |
|
|
|
public_solution_df = public_solution_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
public_submission_df = public_submission_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
|
|
private_solution_df = private_solution_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print('public_solution_df', public_solution_df) |
|
print('private_solution_df', private_solution_df) |
|
|
|
|
|
def _metric(outputs, targets): |
|
|
|
|
|
for row, output in outputs.iterrows(): |
|
print('outputs type', type(outputs), 'targets type', type(outputs)) |
|
|
|
answer = str(output['pred']) |
|
label = str(targets.iloc[row]['pred']) |
|
|
|
print('answer:', answer) |
|
print('label:', label) |
|
|
|
prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {answer}. Caption two: {label}\nScore: " |
|
|
|
response = client.completions.create( |
|
model="gpt-3.5-turbo-instruct", |
|
prompt=prompt, |
|
temperature=0, |
|
max_tokens=2, |
|
) |
|
eval_result = response.choices[0].text.strip() |
|
print(response) |
|
print('eval_result', eval_result) |
|
score = int(eval_result) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return score |
|
|
|
target_cols = [col for col in solution_df.columns if col not in [params.submission_id_col, "split"]] |
|
public_score = _metric(public_solution_df[target_cols], public_submission_df[target_cols]) |
|
private_score = _metric(private_solution_df[target_cols], private_submission_df[target_cols]) |
|
|
|
metric_name = "metric1" |
|
|
|
metric_dict = {"public_score": {metric_name: public_score}, |
|
"private_score": {metric_name: private_score} |
|
} |
|
|
|
return metric_dict |