MMFMChallenge / metric.py
BredForCompanionship's picture
Update metric.py
9628ad8 verified
raw
history blame
1.57 kB
from openai import OpenAI
client = OpenAI()
OpenAI.api_key = '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'
import pandas as pd
from huggingface_hub import hf_hub_download
def compute(params):
public_score = 0
private_score = 0
solution_file = hf_hub_download(
repo_id=params.competition_id,
filename="solution.csv",
token=params.token,
repo_type="dataset",
)
solution_df = pd.read_csv(solution_file)
submission_filename = f"submissions/{params.team_id}-{params.submission_id}.csv"
submission_file = hf_hub_download(
repo_id=params.competition_id,
filename=submission_filename,
token=params.token,
repo_type="dataset",
)
submission_df = pd.read_csv(submission_file)
submitted_answer = str(submission_df.iloc[0]['pred'])
gt = str(solution_df.iloc[0]['pred'])
prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {submitted_answer}. Caption two: {gt}\nScore:"
try:
response = client.completions.create(
engine="gpt-3.5-turbo-instruct",
prompt=prompt,
temperature=0,
max_tokens=1,
)
public_score = int(response.choices[0].text.strip())
except:
print("Error w/ api")
private_score = public_score
metric_dict = {"public_score": {"metric1": public_score},
"private_score": {"metric1": private_score}
}
return metric_dict