capradeepgujaran
commited on
Commit
β’
0d8d4ff
1
Parent(s):
aaae51e
Update app.py
Browse files
app.py
CHANGED
@@ -137,6 +137,56 @@ def process_upload(api_key, files, lang):
|
|
137 |
else:
|
138 |
return f"No valid documents were indexed. Errors: {'; '.join(error_messages)}", None
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
def main():
|
141 |
with gr.Blocks(title="Document Processing App") as demo:
|
142 |
gr.Markdown("# π Document Processing and Querying App")
|
@@ -167,14 +217,16 @@ def main():
|
|
167 |
value="gpt-4o",
|
168 |
label="Select Model"
|
169 |
)
|
|
|
170 |
query_button = gr.Button("Ask")
|
171 |
with gr.Column():
|
172 |
answer_output = gr.Textbox(label="Answer", interactive=False)
|
|
|
173 |
|
174 |
query_button.click(
|
175 |
fn=query_app,
|
176 |
-
inputs=[query_input, model_dropdown, api_key_input],
|
177 |
-
outputs=[answer_output]
|
178 |
)
|
179 |
|
180 |
gr.Markdown("""
|
|
|
137 |
else:
|
138 |
return f"No valid documents were indexed. Errors: {'; '.join(error_messages)}", None
|
139 |
|
140 |
+
# This is the missing query_app function that needs to be defined
|
141 |
+
def query_app(query, model_name, use_similarity_check, openai_api_key):
|
142 |
+
global vector_index, query_log
|
143 |
+
|
144 |
+
if vector_index is None:
|
145 |
+
logging.error("No documents indexed yet. Please upload documents first.")
|
146 |
+
return "No documents indexed yet. Please upload documents first.", None
|
147 |
+
|
148 |
+
if not openai_api_key:
|
149 |
+
logging.error("No OpenAI API Key provided.")
|
150 |
+
return "Please provide a valid OpenAI API Key.", None
|
151 |
+
|
152 |
+
try:
|
153 |
+
llm = OpenAI(model=model_name, api_key=openai_api_key)
|
154 |
+
except Exception as e:
|
155 |
+
logging.error(f"Error initializing the OpenAI model: {e}")
|
156 |
+
return f"Error initializing the OpenAI model: {e}", None
|
157 |
+
|
158 |
+
response_synthesizer = get_response_synthesizer(llm=llm)
|
159 |
+
query_engine = vector_index.as_query_engine(llm=llm, response_synthesizer=response_synthesizer)
|
160 |
+
|
161 |
+
try:
|
162 |
+
response = query_engine.query(query)
|
163 |
+
except Exception as e:
|
164 |
+
logging.error(f"Error during query processing: {e}")
|
165 |
+
return f"Error during query processing: {e}", None
|
166 |
+
|
167 |
+
generated_response = response.response
|
168 |
+
query_log.append({
|
169 |
+
"query_id": str(len(query_log) + 1),
|
170 |
+
"query": query,
|
171 |
+
"gt_answer": "Placeholder ground truth answer",
|
172 |
+
"response": generated_response,
|
173 |
+
"retrieved_context": [{"text": doc.text} for doc in response.source_nodes]
|
174 |
+
})
|
175 |
+
|
176 |
+
metrics = {}
|
177 |
+
|
178 |
+
if use_similarity_check:
|
179 |
+
try:
|
180 |
+
logging.info("Similarity check is enabled. Calculating similarity.")
|
181 |
+
similarity = calculate_similarity(generated_response, "Placeholder ground truth answer")
|
182 |
+
metrics['similarity'] = similarity
|
183 |
+
logging.info(f"Similarity calculated: {similarity}")
|
184 |
+
except Exception as e:
|
185 |
+
logging.error(f"Error during similarity calculation: {e}")
|
186 |
+
metrics['error'] = f"Error during similarity calculation: {e}"
|
187 |
+
|
188 |
+
return generated_response, metrics if use_similarity_check else None
|
189 |
+
|
190 |
def main():
|
191 |
with gr.Blocks(title="Document Processing App") as demo:
|
192 |
gr.Markdown("# π Document Processing and Querying App")
|
|
|
217 |
value="gpt-4o",
|
218 |
label="Select Model"
|
219 |
)
|
220 |
+
similarity_checkbox = gr.Checkbox(label="Use Similarity Check", value=False)
|
221 |
query_button = gr.Button("Ask")
|
222 |
with gr.Column():
|
223 |
answer_output = gr.Textbox(label="Answer", interactive=False)
|
224 |
+
metrics_output = gr.JSON(label="Metrics")
|
225 |
|
226 |
query_button.click(
|
227 |
fn=query_app,
|
228 |
+
inputs=[query_input, model_dropdown, similarity_checkbox, api_key_input],
|
229 |
+
outputs=[answer_output, metrics_output]
|
230 |
)
|
231 |
|
232 |
gr.Markdown("""
|