capradeepgujaran commited on
Commit
bcc31db
β€’
1 Parent(s): 1ccbdc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -84
app.py CHANGED
@@ -4,6 +4,8 @@ import logging
4
  import gradio as gr
5
  import PyPDF2
6
  from pdf2image import convert_from_path
 
 
7
  import docx
8
  from llama_index.core import VectorStoreIndex, Document
9
  from llama_index.embeddings.openai import OpenAIEmbedding
@@ -20,44 +22,52 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(
20
  # Load environment variables from .env file
21
  load_dotenv()
22
 
 
 
 
23
  # Initialize global variables
24
  vector_index = None
25
  query_log = []
26
  sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
27
 
28
- def extract_text_from_pdf(pdf_path):
29
  text = ""
30
  image_count = 0
31
  total_pages = 0
32
-
33
  try:
34
  with open(pdf_path, 'rb') as file:
35
  pdf_reader = PyPDF2.PdfReader(file)
36
  total_pages = len(pdf_reader.pages)
37
-
38
  for page_num, page in enumerate(pdf_reader.pages, 1):
39
  page_text = page.extract_text()
40
- if page_text.strip():
41
- text += page_text
 
 
 
 
 
 
 
42
  else:
43
- image_count += 1
44
- text += f"[Image detected on page {page_num}]\n"
45
-
46
  except Exception as e:
47
  logging.error(f"Error processing PDF {pdf_path}: {str(e)}")
48
  return f"[Error processing PDF: {str(e)}]\n"
49
-
50
  if image_count == total_pages:
51
  summary = f"This document consists of {total_pages} page(s) of images.\n"
52
- summary += "No text could be extracted. Consider manual review or image processing techniques.\n"
53
  summary += f"File path: {pdf_path}\n"
54
  return summary
55
  elif image_count > 0:
56
  text = f"This document contains both text and images.\n" + \
57
  f"Total pages: {total_pages}\n" + \
58
  f"Pages with images: {image_count}\n" + \
59
- f"Extracted text:\n\n" + text
60
-
61
  return text
62
 
63
  def load_docx_file(docx_path):
@@ -76,9 +86,9 @@ def load_txt_file(txt_path):
76
  logging.error(f"Error processing TXT {txt_path}: {str(e)}")
77
  return f"[Error processing TXT: {str(e)}]\n"
78
 
79
- def load_file_based_on_extension(file_path):
80
  if file_path.lower().endswith('.pdf'):
81
- return extract_text_from_pdf(file_path)
82
  elif file_path.lower().endswith('.docx'):
83
  return load_docx_file(file_path)
84
  elif file_path.lower().endswith('.txt'):
@@ -86,7 +96,7 @@ def load_file_based_on_extension(file_path):
86
  else:
87
  raise ValueError(f"Unsupported file format: {file_path}")
88
 
89
- def process_upload(api_key, files):
90
  global vector_index
91
 
92
  if not api_key:
@@ -101,7 +111,7 @@ def process_upload(api_key, files):
101
 
102
  for file_path in files:
103
  try:
104
- text = load_file_based_on_extension(file_path)
105
  if "This document consists of" in text and "page(s) of images" in text:
106
  image_heavy_docs.append(os.path.basename(file_path))
107
  documents.append(Document(text=text))
@@ -127,69 +137,6 @@ def process_upload(api_key, files):
127
  else:
128
  return f"No valid documents were indexed. Errors: {'; '.join(error_messages)}", None
129
 
130
- def calculate_similarity(response, ground_truth):
131
- # Encode the response and ground truth
132
- response_embedding = sentence_model.encode(response, convert_to_tensor=True)
133
- truth_embedding = sentence_model.encode(ground_truth, convert_to_tensor=True)
134
-
135
- # Explicitly normalize the embeddings (should result in unit vectors)
136
- response_embedding = response_embedding / response_embedding.norm(p=2)
137
- truth_embedding = truth_embedding / truth_embedding.norm(p=2)
138
-
139
- # Calculate cosine similarity using sklearn's cosine_similarity function
140
- similarity = cosine_similarity(response_embedding.reshape(1, -1), truth_embedding.reshape(1, -1))[0][0]
141
- return similarity * 100 # Convert to percentage
142
-
143
-
144
- def query_app(query, model_name, use_similarity_check, openai_api_key):
145
- global vector_index, query_log
146
-
147
- if vector_index is None:
148
- logging.error("No documents indexed yet. Please upload documents first.")
149
- return "No documents indexed yet. Please upload documents first.", None
150
-
151
- if not openai_api_key:
152
- logging.error("No OpenAI API Key provided.")
153
- return "Please provide a valid OpenAI API Key.", None
154
-
155
- try:
156
- llm = OpenAI(model=model_name, api_key=openai_api_key)
157
- except Exception as e:
158
- logging.error(f"Error initializing the OpenAI model: {e}")
159
- return f"Error initializing the OpenAI model: {e}", None
160
-
161
- response_synthesizer = get_response_synthesizer(llm=llm)
162
- query_engine = vector_index.as_query_engine(llm=llm, response_synthesizer=response_synthesizer)
163
-
164
- try:
165
- response = query_engine.query(query)
166
- except Exception as e:
167
- logging.error(f"Error during query processing: {e}")
168
- return f"Error during query processing: {e}", None
169
-
170
- generated_response = response.response
171
- query_log.append({
172
- "query_id": str(len(query_log) + 1),
173
- "query": query,
174
- "gt_answer": "Placeholder ground truth answer",
175
- "response": generated_response,
176
- "retrieved_context": [{"text": doc.text} for doc in response.source_nodes]
177
- })
178
-
179
- metrics = {}
180
-
181
- if use_similarity_check:
182
- try:
183
- logging.info("Similarity check is enabled. Calculating similarity.")
184
- similarity = calculate_similarity(generated_response, "Placeholder ground truth answer")
185
- metrics['similarity'] = similarity
186
- logging.info(f"Similarity calculated: {similarity}")
187
- except Exception as e:
188
- logging.error(f"Error during similarity calculation: {e}")
189
- metrics['error'] = f"Error during similarity calculation: {e}"
190
-
191
- return generated_response, metrics if use_similarity_check else None
192
-
193
  def main():
194
  with gr.Blocks(title="Document Processing App") as demo:
195
  gr.Markdown("# πŸ“„ Document Processing and Querying App")
@@ -201,12 +148,13 @@ def main():
201
 
202
  with gr.Row():
203
  file_upload = gr.File(label="Upload Files", file_count="multiple", type="filepath")
 
204
  upload_button = gr.Button("Upload and Index")
205
  upload_status = gr.Textbox(label="Status", interactive=False)
206
 
207
  upload_button.click(
208
  fn=process_upload,
209
- inputs=[api_key_input, file_upload],
210
  outputs=[upload_status]
211
  )
212
 
@@ -219,16 +167,14 @@ def main():
219
  value="gpt-4o",
220
  label="Select Model"
221
  )
222
- similarity_checkbox = gr.Checkbox(label="Use Similarity Check", value=False)
223
  query_button = gr.Button("Ask")
224
  with gr.Column():
225
  answer_output = gr.Textbox(label="Answer", interactive=False)
226
- metrics_output = gr.JSON(label="Metrics")
227
 
228
  query_button.click(
229
  fn=query_app,
230
- inputs=[query_input, model_dropdown, similarity_checkbox, api_key_input],
231
- outputs=[answer_output, metrics_output]
232
  )
233
 
234
  gr.Markdown("""
 
4
  import gradio as gr
5
  import PyPDF2
6
  from pdf2image import convert_from_path
7
+ import pytesseract
8
+ from PIL import Image
9
  import docx
10
  from llama_index.core import VectorStoreIndex, Document
11
  from llama_index.embeddings.openai import OpenAIEmbedding
 
22
  # Load environment variables from .env file
23
  load_dotenv()
24
 
25
+ # Tesseract language options
26
+ langs = os.popen('tesseract --list-langs').read().split('\n')[1:-1]
27
+
28
  # Initialize global variables
29
  vector_index = None
30
  query_log = []
31
  sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
32
 
33
+ def extract_text_from_pdf(pdf_path, lang=None):
34
  text = ""
35
  image_count = 0
36
  total_pages = 0
37
+
38
  try:
39
  with open(pdf_path, 'rb') as file:
40
  pdf_reader = PyPDF2.PdfReader(file)
41
  total_pages = len(pdf_reader.pages)
42
+
43
  for page_num, page in enumerate(pdf_reader.pages, 1):
44
  page_text = page.extract_text()
45
+
46
+ # If text is not found, consider the page as an image and use OCR
47
+ if not page_text.strip():
48
+ images = convert_from_path(pdf_path, first_page=page_num, last_page=page_num)
49
+ for image in images:
50
+ ocr_text = pytesseract.image_to_string(image, lang=None if lang == [] else lang)
51
+ text += ocr_text
52
+ image_count += 1
53
+ text += f"\n[OCR applied on image detected on page {page_num}]\n"
54
  else:
55
+ text += page_text
 
 
56
  except Exception as e:
57
  logging.error(f"Error processing PDF {pdf_path}: {str(e)}")
58
  return f"[Error processing PDF: {str(e)}]\n"
59
+
60
  if image_count == total_pages:
61
  summary = f"This document consists of {total_pages} page(s) of images.\n"
62
+ summary += "No text could be extracted directly. OCR was applied to images.\n"
63
  summary += f"File path: {pdf_path}\n"
64
  return summary
65
  elif image_count > 0:
66
  text = f"This document contains both text and images.\n" + \
67
  f"Total pages: {total_pages}\n" + \
68
  f"Pages with images: {image_count}\n" + \
69
+ f"Extracted text (including OCR):\n\n" + text
70
+
71
  return text
72
 
73
  def load_docx_file(docx_path):
 
86
  logging.error(f"Error processing TXT {txt_path}: {str(e)}")
87
  return f"[Error processing TXT: {str(e)}]\n"
88
 
89
+ def load_file_based_on_extension(file_path, lang=None):
90
  if file_path.lower().endswith('.pdf'):
91
+ return extract_text_from_pdf(file_path, lang)
92
  elif file_path.lower().endswith('.docx'):
93
  return load_docx_file(file_path)
94
  elif file_path.lower().endswith('.txt'):
 
96
  else:
97
  raise ValueError(f"Unsupported file format: {file_path}")
98
 
99
+ def process_upload(api_key, files, lang):
100
  global vector_index
101
 
102
  if not api_key:
 
111
 
112
  for file_path in files:
113
  try:
114
+ text = load_file_based_on_extension(file_path, lang)
115
  if "This document consists of" in text and "page(s) of images" in text:
116
  image_heavy_docs.append(os.path.basename(file_path))
117
  documents.append(Document(text=text))
 
137
  else:
138
  return f"No valid documents were indexed. Errors: {'; '.join(error_messages)}", None
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  def main():
141
  with gr.Blocks(title="Document Processing App") as demo:
142
  gr.Markdown("# πŸ“„ Document Processing and Querying App")
 
148
 
149
  with gr.Row():
150
  file_upload = gr.File(label="Upload Files", file_count="multiple", type="filepath")
151
+ lang_dropdown = gr.Dropdown(choices=langs, label="Select OCR Language", value='eng')
152
  upload_button = gr.Button("Upload and Index")
153
  upload_status = gr.Textbox(label="Status", interactive=False)
154
 
155
  upload_button.click(
156
  fn=process_upload,
157
+ inputs=[api_key_input, file_upload, lang_dropdown],
158
  outputs=[upload_status]
159
  )
160
 
 
167
  value="gpt-4o",
168
  label="Select Model"
169
  )
 
170
  query_button = gr.Button("Ask")
171
  with gr.Column():
172
  answer_output = gr.Textbox(label="Answer", interactive=False)
 
173
 
174
  query_button.click(
175
  fn=query_app,
176
+ inputs=[query_input, model_dropdown, api_key_input],
177
+ outputs=[answer_output]
178
  )
179
 
180
  gr.Markdown("""