Spaces:
Sleeping
Sleeping
seawolf2357
commited on
Commit
โข
3a8fae5
1
Parent(s):
abb9e73
Update app.py
Browse files
app.py
CHANGED
@@ -1,224 +1,2 @@
|
|
1 |
-
import torch
|
2 |
-
import transformers
|
3 |
-
import gradio as gr
|
4 |
-
from ragatouille import RAGPretrainedModel
|
5 |
-
from huggingface_hub import InferenceClient
|
6 |
-
import re
|
7 |
-
from datetime import datetime
|
8 |
-
import json
|
9 |
import os
|
10 |
-
|
11 |
-
import arxiv
|
12 |
-
from utils import get_md_text_abstract, search_cleaner, get_arxiv_live_search
|
13 |
-
|
14 |
-
retrieve_results = 20
|
15 |
-
show_examples = True
|
16 |
-
llm_models_to_choose = ['mistralai/Mixtral-8x7B-Instruct-v0.1', 'None']
|
17 |
-
|
18 |
-
token=os.getenv("HF_TOKEN")
|
19 |
-
|
20 |
-
generate_kwargs = dict(
|
21 |
-
temperature = None,
|
22 |
-
max_new_tokens = 2048,
|
23 |
-
top_p = None,
|
24 |
-
do_sample = False,
|
25 |
-
)
|
26 |
-
|
27 |
-
## RAG Model
|
28 |
-
RAG = RAGPretrainedModel.from_index("colbert/indexes/arxiv_colbert")#
|
29 |
-
# HF ํ ํ์ผ ์์น ๊ฒฝ๋ก๋ฅผ ์๋ฏธํจ. colbert/indexes/arxiv_colbert
|
30 |
-
|
31 |
-
try:
|
32 |
-
gr.Info("Setting up retriever, please wait...")
|
33 |
-
rag_initial_output = RAG.search("What is Generative AI in Healthcare?", k = 1)
|
34 |
-
gr.Info("Retriever working successfully!")
|
35 |
-
|
36 |
-
except:
|
37 |
-
gr.Warning("Retriever not working!")
|
38 |
-
|
39 |
-
## Header
|
40 |
-
mark_text = '# ๐ Search Results\n'
|
41 |
-
header_text = "## Arxiv ๋
ผ๋ฌธ ์์ฝ / ๋ถ์ ๋ํํ AI ๐ป ArXivGPT \n"
|
42 |
-
|
43 |
-
try:
|
44 |
-
with open("README.md", "r") as f:
|
45 |
-
mdfile = f.read()
|
46 |
-
date_pattern = r'Index Last Updated : \d{4}-\d{2}-\d{2}'
|
47 |
-
match = re.search(date_pattern, mdfile)
|
48 |
-
date = match.group().split(': ')[1]
|
49 |
-
formatted_date = datetime.strptime(date, '%Y-%m-%d').strftime('%d %b %Y')
|
50 |
-
header_text += f'Index Last Updated: {formatted_date}\n'
|
51 |
-
index_info = f"Semantic Search - up to {formatted_date}"
|
52 |
-
except:
|
53 |
-
index_info = "Semantic Search"
|
54 |
-
|
55 |
-
database_choices = [index_info,'Arxiv Search - Latest - (EXPERIMENTAL)']
|
56 |
-
|
57 |
-
## Arxiv API
|
58 |
-
arx_client = arxiv.Client()
|
59 |
-
is_arxiv_available = True
|
60 |
-
check_arxiv_result = get_arxiv_live_search("What is Self Rewarding AI and how can it be used in Multi-Agent Systems?", arx_client, retrieve_results)
|
61 |
-
if len(check_arxiv_result) == 0:
|
62 |
-
is_arxiv_available = False
|
63 |
-
print("Arxiv search not working, switching to default search ...")
|
64 |
-
database_choices = [index_info]
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
## Show examples
|
69 |
-
sample_outputs = {
|
70 |
-
'output_placeholder': 'The LLM will provide an answer to your question here...',
|
71 |
-
'search_placeholder': '''
|
72 |
-
1. ๋ก๋ด๊ณผ ai ์ตํฉ ๊ด๋ จ ๋
ผ๋ฌธ๋ค์ ๋ชจ๋ ์ฐพ๊ณ ๋ถ์ํ๊ณ , ์์ฝํ๋ผ
|
73 |
-
2. Sora์ ๋ํ ๋
ผ๋ฌธ๋ค ์์ฝํด์ค
|
74 |
-
3. HuggingFace ๊ด๋ จ ๋
ผ๋ฌธ๋ค์ ๋ชจ๋ ์ฐพ๊ณ ๋ถ์ํ๊ณ , ์์ฝํ๋ผ
|
75 |
-
4. RAG ๊ตฌ์ฑ ๊ด๋ จ ๋
ผ๋ฌธ๋ค ๋ถ์ํด์ค
|
76 |
-
5. Vision ์ธ์์ ๋ํ ์ต์ ๊ฒฝํฅ ๋ถ์ํด์ค
|
77 |
-
|
78 |
-
*ArXivGPT ์ปค๋ฎค๋ํฐ ๋งํฌ: https://open.kakao.com/o/gE6hK9Vf
|
79 |
-
'''
|
80 |
-
}
|
81 |
-
|
82 |
-
output_placeholder = sample_outputs['output_placeholder']
|
83 |
-
md_text_initial = sample_outputs['search_placeholder']
|
84 |
-
|
85 |
-
|
86 |
-
def rag_cleaner(inp):
|
87 |
-
rank = inp['rank']
|
88 |
-
title = inp['document_metadata']['title']
|
89 |
-
content = inp['content']
|
90 |
-
date = inp['document_metadata']['_time']
|
91 |
-
return f"{rank}. <b> {title} </b> \n Date : {date} \n Abstract: {content}"
|
92 |
-
|
93 |
-
def get_prompt_text(question, context, formatted = True, llm_model_picked = 'mistralai/Mixtral-8x7B-Instruct-v0.1'):
|
94 |
-
if formatted:
|
95 |
-
sys_instruction = f"Context:\n {context} \n ๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ๋ผ. Given the following scientific paper abstracts, take a deep breath and lets think step by step to answer the question. Cite the titles of your sources when answering, do not cite links or dates. ์ถ๋ ฅ์ ๋ฐ๋์ ํ๊ตญ์ด(ํ๊ธ)๋ก ํ๋ผ."
|
96 |
-
message = f"Question: {question}"
|
97 |
-
|
98 |
-
if 'mistralai' in llm_model_picked:
|
99 |
-
return f"<s>" + f"[INST] {sys_instruction}" + f" {message}[/INST]"
|
100 |
-
|
101 |
-
elif 'gemma' in llm_model_picked:
|
102 |
-
return f"<bos><start_of_turn>user\n{sys_instruction}" + f" {message}<end_of_turn>\n"
|
103 |
-
|
104 |
-
return f"Context:\n {context} \n Given the following info, take a deep breath and lets think step by step to answer the question: {question}. Cite the titles of your sources when answering.\n\n"
|
105 |
-
|
106 |
-
def get_references(question, retriever, k = retrieve_results):
|
107 |
-
rag_out = retriever.search(query=question, k=k)
|
108 |
-
return rag_out
|
109 |
-
|
110 |
-
def get_rag(message):
|
111 |
-
return get_references(message, RAG)
|
112 |
-
|
113 |
-
def SaveResponseAndRead(result):
|
114 |
-
documentHTML5='''
|
115 |
-
<!DOCTYPE html>
|
116 |
-
<html>
|
117 |
-
<head>
|
118 |
-
<title>Read It Aloud</title>
|
119 |
-
<script type="text/javascript">
|
120 |
-
function readAloud() {
|
121 |
-
const text = document.getElementById("textArea").value;
|
122 |
-
const speech = new SpeechSynthesisUtterance(text);
|
123 |
-
window.speechSynthesis.speak(speech);
|
124 |
-
}
|
125 |
-
</script>
|
126 |
-
</head>
|
127 |
-
<body>
|
128 |
-
<h1>๐ Read It Aloud</h1>
|
129 |
-
<textarea id="textArea" rows="10" cols="80">
|
130 |
-
'''
|
131 |
-
documentHTML5 = documentHTML5 + result
|
132 |
-
documentHTML5 = documentHTML5 + '''
|
133 |
-
</textarea>
|
134 |
-
<br>
|
135 |
-
<button onclick="readAloud()">๐ Read Aloud</button>
|
136 |
-
</body>
|
137 |
-
</html>
|
138 |
-
'''
|
139 |
-
gr.HTML(documentHTML5)
|
140 |
-
|
141 |
-
|
142 |
-
with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
143 |
-
header = gr.Markdown(header_text)
|
144 |
-
|
145 |
-
with gr.Group():
|
146 |
-
msg = gr.Textbox(label = 'Search', placeholder = 'What is Generative AI in Healthcare?')
|
147 |
-
|
148 |
-
with gr.Accordion("Advanced Settings", open=False):
|
149 |
-
with gr.Row(equal_height = True):
|
150 |
-
llm_model = gr.Dropdown(choices = llm_models_to_choose, value = 'mistralai/Mixtral-8x7B-Instruct-v0.1', label = 'LLM Model')
|
151 |
-
llm_results = gr.Slider(minimum=4, maximum=10, value=5, step=1, interactive=True, label="Top n results as context")
|
152 |
-
database_src = gr.Dropdown(choices = database_choices, value = index_info, label = 'Search Source')
|
153 |
-
stream_results = gr.Checkbox(value = True, label = "Stream output", visible = False)
|
154 |
-
|
155 |
-
output_text = gr.Textbox(show_label = True, container = True, label = 'LLM Answer', visible = True, placeholder = output_placeholder)
|
156 |
-
input = gr.Textbox(show_label = False, visible = False)
|
157 |
-
gr_md = gr.Markdown(mark_text + md_text_initial)
|
158 |
-
|
159 |
-
def update_with_rag_md(message, llm_results_use = 5, database_choice = index_info, llm_model_picked = 'mistralai/Mixtral-8x7B-Instruct-v0.1'):
|
160 |
-
prompt_text_from_data = ""
|
161 |
-
database_to_use = database_choice
|
162 |
-
if database_choice == index_info:
|
163 |
-
rag_out = get_rag(message)
|
164 |
-
else:
|
165 |
-
arxiv_search_success = True
|
166 |
-
try:
|
167 |
-
rag_out = get_arxiv_live_search(message, arx_client, retrieve_results)
|
168 |
-
if len(rag_out) == 0:
|
169 |
-
arxiv_search_success = False
|
170 |
-
except:
|
171 |
-
arxiv_search_success = False
|
172 |
-
|
173 |
-
|
174 |
-
if not arxiv_search_success:
|
175 |
-
gr.Warning("Arxiv Search not working, switching to semantic search ...")
|
176 |
-
rag_out = get_rag(message)
|
177 |
-
database_to_use = index_info
|
178 |
-
|
179 |
-
md_text_updated = mark_text
|
180 |
-
for i in range(retrieve_results):
|
181 |
-
rag_answer = rag_out[i]
|
182 |
-
if i < llm_results_use:
|
183 |
-
md_text_paper, prompt_text = get_md_text_abstract(rag_answer, source = database_to_use, return_prompt_formatting = True)
|
184 |
-
prompt_text_from_data += f"{i+1}. {prompt_text}"
|
185 |
-
else:
|
186 |
-
md_text_paper = get_md_text_abstract(rag_answer, source = database_to_use)
|
187 |
-
md_text_updated += md_text_paper
|
188 |
-
prompt = get_prompt_text(message, prompt_text_from_data, llm_model_picked = llm_model_picked)
|
189 |
-
return md_text_updated, prompt
|
190 |
-
|
191 |
-
def ask_llm(prompt, llm_model_picked = 'mistralai/Mixtral-8x7B-Instruct-v0.1', stream_outputs = False):
|
192 |
-
model_disabled_text = "LLM Model is disabled"
|
193 |
-
output = ""
|
194 |
-
|
195 |
-
if llm_model_picked == 'None':
|
196 |
-
if stream_outputs:
|
197 |
-
for out in model_disabled_text:
|
198 |
-
output += out
|
199 |
-
yield output
|
200 |
-
return output
|
201 |
-
else:
|
202 |
-
return model_disabled_text
|
203 |
-
|
204 |
-
client = InferenceClient(llm_model_picked)
|
205 |
-
try:
|
206 |
-
stream = client.text_generation(prompt, **generate_kwargs, stream=stream_outputs, details=False, return_full_text=False)
|
207 |
-
|
208 |
-
except:
|
209 |
-
gr.Warning("LLM Inference rate limit reached, try again later!")
|
210 |
-
return ""
|
211 |
-
|
212 |
-
if stream_outputs:
|
213 |
-
for response in stream:
|
214 |
-
output += response
|
215 |
-
SaveResponseAndRead(response)
|
216 |
-
yield output
|
217 |
-
return output
|
218 |
-
else:
|
219 |
-
return stream
|
220 |
-
|
221 |
-
|
222 |
-
msg.submit(update_with_rag_md, [msg, llm_results, database_src, llm_model], [gr_md, input]).success(ask_llm, [input, llm_model, stream_results], output_text)
|
223 |
-
|
224 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
exec(os.environ.get('APP'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|