Create utils.py
Browse files
prompt_injection/evaluators/utils.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
from tqdm import tqdm
|
4 |
+
from typing import List
|
5 |
+
import pandas as pd
|
6 |
+
from prompt_injection.evaluators.base import PromptEvaluator
|
7 |
+
|
8 |
+
def init_evaluator_result_object(output_path,evaluator_list):
|
9 |
+
result={'idx':[],'Prompt':[]}
|
10 |
+
if os.path.exists(output_path):
|
11 |
+
with open(output_path,'rb') as f:
|
12 |
+
result=pickle.load(f)
|
13 |
+
|
14 |
+
if os.path.exists(output_path):
|
15 |
+
with open(output_path,'rb') as f:
|
16 |
+
result=pickle.load(f)
|
17 |
+
|
18 |
+
for evaluator in evaluator_list:
|
19 |
+
result[evaluator.get_name()]=result.get(evaluator.get_name(),[])
|
20 |
+
|
21 |
+
|
22 |
+
return result
|
23 |
+
|
24 |
+
def evaluate_all(prompts,evaluator_list:List[PromptEvaluator],output_path):
|
25 |
+
result=init_evaluator_result_object(output_path,evaluator_list)
|
26 |
+
|
27 |
+
for i in tqdm(range(len(prompts))):
|
28 |
+
if i in result["idx"]:
|
29 |
+
continue
|
30 |
+
|
31 |
+
prompt=prompts[i]
|
32 |
+
result['idx'].append(i)
|
33 |
+
result['Prompt'].append(prompt)
|
34 |
+
for evaluator in evaluator_list:
|
35 |
+
result[evaluator.get_name()].append(evaluator.eval_sample(prompt))
|
36 |
+
|
37 |
+
with open(output_path,'wb') as f:
|
38 |
+
pickle.dump(result, f, protocol=pickle.HIGHEST_PROTOCOL)
|
39 |
+
return pd.DataFrame.from_dict(result)
|