Spaces:
Sleeping
Sleeping
from prompt import Prompt | |
from openai import OpenAI | |
from fuzzywuzzy import fuzz | |
from fuzzywuzzy import process | |
import gradio as gr | |
import pandas as pd | |
import os,json | |
import time | |
QUESTION_DICT = { | |
"Question 1": "Animal Type", | |
"Question 2": "Exposure Age", | |
"Question 3": "Behavior Test", | |
"intervention_1": "Intervention 1", | |
"intervention_2": "Intervention 2", | |
"Question 5": "Genetic Chain", | |
"Question 6": "Issues or Challenge Resolved", | |
"Question 7": "Innovations in Methodology", | |
"Question 8": "Impact of Findings", | |
"Question 9": "limitations", | |
"Question 10": "Potential Applications", | |
} | |
REVERSE_QUESTION_DICT = { | |
"Animal Type": "Question 1", | |
"Exposure Age": "Question 2", | |
"Behavior Test": "Question 3", | |
"Intervention 1": "Question 4", | |
"Intervention 2": "Question 5", | |
"Genetic Chain": "Question 6", | |
"Issues or Challenge Resolved": "Question 7", | |
"Innovations in Methodology": "Question 8", | |
"Impact of Findings": "Question 9", | |
"limitations": "Question 10", | |
"Potential Applications": "Question 11", | |
} | |
class Backend: | |
def __init__(self): | |
self.agent = OpenAI() | |
self.prompt = Prompt() | |
def read_file_single(self, file): | |
# read the file | |
if file is not None: | |
with open(file.name, 'r') as f: | |
text = f.read() | |
else: | |
raise gr.Error("You need to upload a file first") | |
return text | |
def phrase_pdf(self, file_path): | |
from langchain.document_loaders import UnstructuredPDFLoader | |
loader = UnstructuredPDFLoader(file_path, model = 'elements') | |
file = loader.load() | |
return file[0].page_content | |
def read_file(self, files): | |
# read the file | |
text_list = [] | |
self.filename_list = [] | |
if files is not None: | |
for file in files: | |
if file.name.split('.')[-1] == 'pdf': | |
# convert pdf to txt | |
text = self.phrase_pdf(file.name) | |
else: | |
with open(file.name, 'r', encoding='utf-8') as f: | |
text = f.read() | |
text_list.append(text) | |
self.filename_list.append(file.name.split('\\')[-1]) | |
else: | |
raise gr.Error("You need to upload a file first") | |
return text_list | |
def highlight_text(self, text, highlight_list): | |
# Find the original sentences | |
# Split the passage into sentences | |
# sentences_in_passage = text.replace('\n', '') | |
sentences_in_passage = text.split('.') | |
sentences_in_passage = [i.split('\n') for i in sentences_in_passage] | |
new_sentences_in_passage = [] | |
for i in sentences_in_passage: | |
new_sentences_in_passage = new_sentences_in_passage + i | |
new_sentences_in_passage = [i for i in new_sentences_in_passage if len(i) > 10] | |
# hightlight the reference | |
for hl in highlight_list: | |
# Find the best match using fuzzy matching | |
best_match = process.extractOne(hl, new_sentences_in_passage, scorer=fuzz.partial_ratio) | |
text = text.replace(best_match[0], f'<mark style="background: #A5D2F1">{best_match[0]}</mark><mark style="background: #FFC0CB"><font color="red"> (match score:{best_match[1]})</font></mark>') | |
# add line break | |
text = text.replace('\n', f" <br /> ") | |
# add scroll bar | |
text = f'<div style="height: 600px; overflow: auto;">{text}</div>' | |
return text | |
def process_file_online(self, file, questions, openai_key, model_selection, progress = gr.Progress()): | |
# record the questions | |
self.questions = questions | |
# get the text_list | |
self.text_list = self.read_file(file) | |
# make the prompt | |
prompt_list = [self.prompt.get(text, questions, 'v3') for text in self.text_list] | |
# select the model | |
if model_selection == 'ChatGPT': | |
model = 'gpt-3.5-turbo-16k' | |
elif model_selection == 'GPT4': | |
model = 'gpt-4-1106-preview' | |
# interact with openai | |
self.res_list = [] | |
for prompt in progress.tqdm(prompt_list, desc = 'Generating answers...'): | |
res = self.agent(prompt, with_history = False, temperature = 0.1, model = model, api_key = openai_key) | |
res = self.prompt.process_result(res, 'v3') | |
self.res_list.append(res) | |
# Use the first file as default | |
# Use the first question for multiple questions | |
gpt_res = self.res_list[0] | |
self.gpt_result = gpt_res | |
self.current_question = 0 | |
self.totel_question = len(res.keys()) | |
self.current_passage = 0 | |
self.total_passages = len(self.res_list) | |
# make a dataframe to record everything | |
self.ori_answer_df = pd.DataFrame() | |
self.answer_df = pd.DataFrame() | |
for i, res in enumerate(self.res_list): | |
tmp = pd.DataFrame(res).T | |
tmp = tmp.reset_index() | |
tmp = tmp.rename(columns={"index":"question_id"}) | |
tmp['filename'] = self.filename_list[i] | |
tmp['question'] = self.questions | |
self.ori_answer_df = pd.concat([tmp, self.ori_answer_df]) | |
self.answer_df = pd.concat([tmp, self.answer_df]) | |
# default fist question | |
res = res['Question 1'] | |
question = self.questions[self.current_question] | |
self.answer = res['answer'] | |
self.text = self.text_list[0] | |
self.highlighted_out = res['original sentences'] | |
highlighted_out_html = self.highlight_text(self.text, self.highlighted_out) | |
self.highlighted_out = '\n'.join(self.highlighted_out) | |
file_name = self.filename_list[self.current_passage] | |
return file_name, question, self.answer, self.highlighted_out, highlighted_out_html, self.answer, self.highlighted_out | |
def process_results(self, answer_correct, correct_answer, reference_correct, correct_reference): | |
if not hasattr(self, 'clicked_correct_answer'): | |
raise gr.Error("You need to judge whether the generated answer is correct first") | |
if not hasattr(self, 'clicked_correct_reference'): | |
raise gr.Error("You need to judge whether the highlighted reference is correct first") | |
if not hasattr(self, 'answer_df'): | |
raise gr.Error("You need to submit the document first") | |
if self.current_question >= self.totel_question or self.current_question < 0: | |
raise gr.Error("No more questions, please return back") | |
# record the answer | |
condition = (self.answer_df['question_id'] == f'Question {self.current_question + 1}' ) & \ | |
(self.answer_df['filename'] == self.filename_list[self.current_passage]) | |
self.answer_df.loc[condition, 'answer_correct'] = answer_correct | |
self.answer_df.loc[condition, 'reference_correct'] = reference_correct | |
# self.answer_df.loc[f'Question {self.current_question + 1}', 'answer_correct'] = answer_correct | |
# self.answer_df.loc[f'Question {self.current_question + 1}', 'reference_correct'] = reference_correct | |
if self.clicked_correct_answer == True: | |
if hasattr(self, 'answer'): | |
self.answer_df.loc[condition, 'correct_answer'] = self.answer | |
else: | |
raise gr.Error("You need to submit the document first") | |
else: | |
# self.answer_df.loc[f'Question {self.current_question + 1}', 'correct_answer'] = correct_answer | |
self.answer_df.loc[condition, 'correct_answer'] = correct_answer | |
if self.clicked_correct_reference == True: | |
if hasattr(self, 'highlighted_out'): | |
self.answer_df.loc[condition, 'correct_reference'] = self.highlighted_out | |
else: | |
raise gr.Error("You need to submit the document first") | |
else: | |
self.answer_df.loc[condition, 'correct_reference'] = correct_reference | |
gr.Info('Results saved!') | |
return "Results saved!" | |
def process_next(self): | |
self.current_question += 1 | |
if hasattr(self, 'clicked_correct_answer'): | |
del self.clicked_correct_answer | |
if hasattr(self, 'clicked_correct_reference'): | |
del self.clicked_correct_reference | |
if self.current_question >= self.totel_question: | |
# self.current_question -= 1 | |
return "No more questions!", "No more questions!", "No more questions!", "No more questions!", "No more questions!", 'No more questions!', 'No more questions!', 'Still need to click the button above to save the results', None, None | |
else: | |
# res = self.gpt_result[f'Question {self.current_question + 1}'] | |
res = self.gpt_result[list(self.gpt_result.keys())[self.current_question]] | |
question = self.questions[self.current_question] | |
self.answer = res['answer'] | |
self.highlighted_out = res['original sentences'] | |
highlighted_out_html = self.highlight_text(self.text, self.highlighted_out) | |
self.highlighted_out = '\n'.join(self.highlighted_out) | |
file_name = self.filename_list[self.current_passage] | |
return file_name, question, self.answer, self.highlighted_out, highlighted_out_html, 'Please judge on the generated answer', 'Please judge on the generated answer', 'Still need to click the button above to save the results', None, None | |
def process_last(self): | |
self.current_question -= 1 | |
# To make sure to correct the answer first | |
if hasattr(self, 'clicked_correct_answer'): | |
del self.clicked_correct_answer | |
if hasattr(self, 'clicked_correct_reference'): | |
del self.clicked_correct_reference | |
# check question boundary | |
if self.current_question < 0: | |
# self.current_question += 1 | |
return "No more questions!", "No more questions!", "No more questions!", "No more questions!", "No more questions!", 'No more questions!', 'No more questions!', 'Still need to click the button above to save the results', None, None | |
else: | |
# res = self.gpt_result[f'Question {self.current_question + 1}'] | |
res = self.gpt_result[list(self.gpt_result.keys())[self.current_question]] | |
question = self.questions[self.current_question] | |
self.answer = res['answer'] | |
self.highlighted_out = res['original sentences'] | |
highlighted_out_html = self.highlight_text(self.text, self.highlighted_out) | |
self.highlighted_out = '\n'.join(self.highlighted_out) | |
file_name = self.filename_list[self.current_passage] | |
return file_name, question, self.answer, self.highlighted_out, highlighted_out_html, 'Please judge on the generated answer', 'Please judge on the generated answer', 'Still need to click the button above to save the results', None, None | |
def switch_next_passage(self): | |
self.current_question = 0 | |
# To make sure to correct the answer first | |
if hasattr(self, 'clicked_correct_answer'): | |
del self.clicked_correct_answer | |
if hasattr(self, 'clicked_correct_reference'): | |
del self.clicked_correct_reference | |
self.current_passage += 1 | |
if self.current_passage >= self.total_passages: | |
# self.current_passage -= 1 | |
return "No more passages!", "No more passages!", "No more passages!", "No more passages!", "No more passages!", 'No more passages!', 'No more passages!', 'Still need to click the button above to save the results', None, None | |
else: | |
self.text = self.text_list[self.current_passage] | |
gpt_res = self.res_list[self.current_passage] | |
self.gpt_result = gpt_res | |
# res = self.gpt_result[f'Question {self.current_question + 1}'] | |
res = self.gpt_result[list(self.gpt_result.keys())[self.current_question]] | |
question = self.questions[self.current_question] | |
self.answer = res['answer'] | |
self.highlighted_out = res['original sentences'] | |
highlighted_out_html = self.highlight_text(self.text, self.highlighted_out) | |
self.highlighted_out = '\n'.join(self.highlighted_out) | |
file_name = self.filename_list[self.current_passage] | |
return file_name, question, self.answer, self.highlighted_out, highlighted_out_html, 'Please judge on the generated answer', 'Please judge on the generated answer', 'Still need to click the button above to save the results', None, None | |
def switch_last_passage(self): | |
self.current_question = 0 | |
# To make sure to correct the answer first | |
if hasattr(self, 'clicked_correct_answer'): | |
del self.clicked_correct_answer | |
if hasattr(self, 'clicked_correct_reference'): | |
del self.clicked_correct_reference | |
self.current_passage -= 1 | |
if self.current_passage < 0: | |
# self.current_passage += 1 | |
return "No more passages!", "No more passages!", "No more passages!", "No more passages!", "No more passages!", 'No more passages!', 'No more passages!', 'Still need to click the button above to save the results', None, None | |
else: | |
self.text = self.text_list[self.current_passage] | |
gpt_res = self.res_list[self.current_passage] | |
self.gpt_result = gpt_res | |
# res = self.gpt_result[f'Question {self.current_question + 1}'] | |
res = self.gpt_result[list(self.gpt_result.keys())[self.current_question]] | |
question = self.questions[self.current_question] | |
self.answer = res['answer'] | |
self.highlighted_out = res['original sentences'] | |
highlighted_out_html = self.highlight_text(self.text, self.highlighted_out) | |
self.highlighted_out = '\n'.join(self.highlighted_out) | |
file_name = self.filename_list[self.current_passage] | |
return file_name, question, self.answer, self.highlighted_out, highlighted_out_html, 'Please judge on the generated answer', 'Please judge on the generated answer', 'Still need to click the button above to save the results', None, None | |
def download_answer(self, path = './tmp', name = 'answer.xlsx'): | |
path = os.path.join(path,str(time.time())) | |
os.makedirs(path, exist_ok = True) | |
path = os.path.join(path, name) | |
# self.ori_answer_df['questions'] = self.questions | |
if not hasattr(self, 'ori_answer_df'): | |
raise gr.Error("You need to submit the document first") | |
else: | |
self.ori_answer_df.to_excel(path, index = False) | |
return path | |
def download_corrected(self, path = './tmp', name = 'corrected_answer.xlsx'): | |
path = os.path.join(path,str(time.time())) | |
os.makedirs(path, exist_ok = True) | |
path = os.path.join(path, name) | |
# self.answer_df['questions'] = self.questions | |
if not hasattr(self, 'answer_df'): | |
raise gr.Error("You need to submit the document first") | |
else: | |
self.answer_df.to_excel(path, index = False) | |
return path | |
def change_correct_answer(self, correctness): | |
if correctness == "Correct": | |
self.clicked_correct_answer = True | |
return "No need to change" | |
else: | |
if hasattr(self, 'answer'): | |
self.clicked_correct_answer = False | |
return self.answer | |
else: | |
return "No answer yet, you need to submit the document first" | |
def change_correct_reference(self, correctness): | |
if correctness == "Correct": | |
self.clicked_correct_reference = True | |
return "No need to change" | |
else: | |
if hasattr(self, 'highlighted_out'): | |
self.clicked_correct_reference = False | |
return self.highlighted_out | |
else: | |
return "No answer yet, you need to submit the document first" | |
def phase_df(self, df, questions): | |
df = json.loads(df.T.to_json()) | |
res_list = [] | |
for key, item in df.items(): | |
tmp_res_list = {} | |
if 'Question 1' in item and "Animal Type" in questions: | |
tep_res_list_q1 = { | |
'answer': item['Question 1'], | |
'original sentences': eval(item['Question 1_original_sentences']), | |
} | |
tmp_res_list['Question 1'] = tep_res_list_q1 | |
if 'Question 2' in item and 'Exposure Age' in questions: | |
tep_res_list_q2 = { | |
'answer': item['Question 2'], | |
'original sentences': eval(item['Question 2_original_sentences']), | |
} | |
tmp_res_list['Question 2'] = tep_res_list_q2 | |
if 'Question 3' in item and 'Behavior Test' in questions: | |
tep_res_list_q3 = { | |
'answer': item['Question 3'], | |
'original sentences': eval(item['Question 3_original_sentences']), | |
} | |
tmp_res_list['Question 3'] = tep_res_list_q3 | |
if 'intervention_1' in item and "Intervention 1" in questions: | |
tep_res_list_q4 = { | |
'answer': item['intervention_1'], | |
'original sentences': eval(item['Question 4intervention_1_original_sentences']), | |
} | |
tmp_res_list['Question 4'] = tep_res_list_q4 | |
if 'intervention_2' in item and "Intervention 2" in questions: | |
tep_res_list_q5 = { | |
'answer': item['intervention_2'], | |
'original sentences': eval(item['Question 4intervention_2_original_sentences']), | |
} | |
tmp_res_list['Question 5'] = tep_res_list_q5 | |
if 'Question 5' in item and "Genetic Chain" in questions: | |
tep_res_list_q6 = { | |
'answer': item['Question 5'], | |
'original sentences': eval(item['Question 5_original_sentences']), | |
} | |
tmp_res_list['Question 6'] = tep_res_list_q6 | |
if 'Question 6' in item and "Issues or Challenge Resolved" in questions: | |
tep_res_list_q7 = { | |
'answer': item['Question 6'], | |
'original sentences': eval(item['Question 6_original_sentences']), | |
} | |
tmp_res_list['Question 7'] = tep_res_list_q7 | |
if 'Question 7' in item and "Innovations in Methodology" in questions: | |
tep_res_list_q8 = { | |
'answer': item['Question 7'], | |
'original sentences': eval(item['Question 7_original_sentences']), | |
} | |
tmp_res_list['Question 8'] = tep_res_list_q8 | |
if 'Question 8' in item and "Impact of Findings" in questions: | |
tep_res_list_q9 = { | |
'answer': item['Question 8'], | |
'original sentences': eval(item['Question 8_original_sentences']), | |
} | |
tmp_res_list['Question 9'] = tep_res_list_q9 | |
if 'Question 9' in item and "limitations" in questions: | |
tep_res_list_q10 = { | |
'answer': item['Question 9'], | |
'original sentences': eval(item['Question 9_original_sentences']), | |
} | |
tmp_res_list['Question 10'] = tep_res_list_q10 | |
if 'Question 10' in item and "Potential Applications" in questions: | |
tep_res_list_q11 = { | |
'answer': item['Question 10'], | |
'original sentences': eval(item['Question 10_original_sentences']), | |
} | |
tmp_res_list['Question 11'] = tep_res_list_q11 | |
res_list.append(tmp_res_list) | |
# checking | |
for i in questions: | |
if REVERSE_QUESTION_DICT[i] not in tmp_res_list: | |
raise gr.Error(f"Question {i} is not in the answer list, Please don't select it!") | |
return res_list | |
def process_file_offline(self, questions, answer_type, progress = gr.Progress()): | |
# record the questions | |
# self.questions = questions | |
self.questions = [f"[ Question {i + 1}/{len(questions)} ] {q}" for i, q in enumerate(questions)] | |
# get the text_list | |
if answer_type == 'ChatGPT_txt': | |
df = pd.read_csv('./offline_results/results_all.csv') | |
elif answer_type == 'GPT4_txt': | |
df = pd.read_csv('./offline_results/results_all_gpt4.csv') | |
elif answer_type == 'New_GPT_4_pdf': | |
df = pd.read_csv('./offline_results/results_new_pdf.csv') | |
elif answer_type == 'Exp_training': | |
df = pd.read_csv('./offline_results/exp_test.csv') | |
elif answer_type == 'Exp_Group_A': | |
df = pd.read_csv('./offline_results/exp_ga.csv') | |
elif answer_type == 'Exp_Group_B': | |
df = pd.read_csv('./offline_results/exp_gb.csv') | |
# make the prompt | |
self.res_list = self.phase_df(df, questions) | |
if answer_type in ['ChatGPT_txt', 'GPT4_txt', 'New_GPT_4_pdf']: | |
if answer_type == 'ChatGPT_txt' or answer_type == 'GPT4_txt': | |
txt_root_path = './20230808-AI coding-1st round' | |
self.filename_list = df['fn'].tolist() | |
elif answer_type == 'New_GPT_4_pdf': | |
txt_root_path = './new_pdfs' | |
self.filename_list = df['fn'].tolist() | |
self.filename_list = ['.'.join(f.split('.')[:-1]) + '.txt' for f in self.filename_list] | |
self.text_list = [] | |
for file in progress.tqdm(self.filename_list): | |
if file.split('.')[-1] == 'pdf': | |
# convert pdf to txt | |
text = self.phrase_pdf(os.path.join(txt_root_path, file)) | |
else: | |
text_path = os.path.join(txt_root_path, file) | |
with open(text_path, 'r', encoding='utf-8') as f: | |
text = f.read() | |
self.text_list.append(text) | |
elif answer_type in ['Exp_training', 'Exp_Group_A', 'Exp_Group_B']: | |
self.filename_list = df['fn'].tolist() | |
if "Passage" not in self.filename_list[0]: | |
self.filename_list = [f"[ Passage {i + 1}/{len(self.filename_list)} ] {self.filename_list[i]}" for i in range(len(self.filename_list))] | |
self.text_list = df['content'].tolist() | |
# Use the first file as default | |
# Use the first question for multiple questions | |
gpt_res = self.res_list[0] | |
self.gpt_result = gpt_res | |
self.current_question = 0 | |
self.totel_question = len(self.res_list[0].keys()) | |
self.current_passage = 0 | |
self.total_passages = len(self.res_list) | |
# make a dataframe to record everything | |
self.ori_answer_df = pd.DataFrame() | |
self.answer_df = pd.DataFrame() | |
for i, res in enumerate(self.res_list): | |
tmp = pd.DataFrame(res).T | |
tmp = tmp.reset_index() | |
tmp = tmp.rename(columns={"index":"question_id"}) | |
tmp['filename'] = self.filename_list[i] | |
tmp['question'] = self.questions | |
self.ori_answer_df = pd.concat([tmp, self.ori_answer_df]) | |
self.answer_df = pd.concat([tmp, self.answer_df]) | |
# default fist question | |
gpt_res = gpt_res[list(gpt_res.keys())[0]] | |
question = self.questions[self.current_question] | |
self.answer = gpt_res['answer'] | |
self.text = self.text_list[0] | |
self.highlighted_out = gpt_res['original sentences'] | |
highlighted_out_html = self.highlight_text(self.text, self.highlighted_out) | |
self.highlighted_out = '\n'.join(self.highlighted_out) | |
file_name = self.filename_list[self.current_passage] | |
return file_name, question, self.answer, self.highlighted_out, highlighted_out_html, self.answer, self.highlighted_out | |