Spaces:
Sleeping
Sleeping
wholewhale
commited on
Commit
•
d2fbc5e
1
Parent(s):
6989cae
fixing not found problem
Browse files
app.py
CHANGED
@@ -1,188 +1,105 @@
|
|
1 |
import urllib.request
|
2 |
-
import fitz
|
3 |
import re
|
4 |
import numpy as np
|
5 |
import tensorflow_hub as hub
|
6 |
-
import openai
|
7 |
-
import gradio as gr
|
8 |
-
import os
|
9 |
from sklearn.neighbors import NearestNeighbors
|
|
|
|
|
10 |
|
11 |
def download_pdf(url, output_path):
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
14 |
|
15 |
def preprocess(text):
|
16 |
text = text.replace('\n', ' ')
|
17 |
-
text = re.sub('\s+', ' ', text)
|
18 |
return text
|
19 |
|
20 |
-
|
21 |
def pdf_to_text(path, start_page=1, end_page=None):
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
|
28 |
-
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
doc.close()
|
36 |
-
return text_list
|
37 |
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
def text_to_chunks(texts, word_length=150, start_page=1):
|
40 |
-
|
41 |
-
page_nums = []
|
42 |
chunks = []
|
43 |
-
|
44 |
-
for idx, words in enumerate(
|
45 |
for i in range(0, len(words), word_length):
|
46 |
-
chunk = words[i:i+word_length]
|
47 |
-
if (i+word_length) > len(words) and (len(chunk) < word_length) and (
|
48 |
-
len(text_toks) != (idx+1)):
|
49 |
-
text_toks[idx+1] = chunk + text_toks[idx+1]
|
50 |
-
continue
|
51 |
chunk = ' '.join(chunk).strip()
|
52 |
-
chunk = f'[Page no. {idx+start_page}]
|
53 |
chunks.append(chunk)
|
|
|
54 |
return chunks
|
55 |
|
56 |
-
|
57 |
class SemanticSearch:
|
58 |
-
|
59 |
def __init__(self):
|
60 |
self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
|
61 |
self.fitted = False
|
62 |
-
|
63 |
-
|
64 |
-
def fit(self, data, batch=1000, n_neighbors=5):
|
65 |
self.data = data
|
66 |
-
self.embeddings = self.
|
67 |
-
|
68 |
-
self.nn = NearestNeighbors(n_neighbors=n_neighbors)
|
69 |
self.nn.fit(self.embeddings)
|
70 |
self.fitted = True
|
71 |
-
|
72 |
-
|
73 |
-
def __call__(self, text, return_data=True):
|
74 |
-
inp_emb = self.use([text])
|
75 |
-
neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
|
76 |
-
|
77 |
-
if return_data:
|
78 |
-
return [self.data[i] for i in neighbors]
|
79 |
-
else:
|
80 |
-
return neighbors
|
81 |
-
|
82 |
-
|
83 |
-
def get_text_embedding(self, texts, batch=1000):
|
84 |
-
embeddings = []
|
85 |
-
for i in range(0, len(texts), batch):
|
86 |
-
text_batch = texts[i:(i+batch)]
|
87 |
-
emb_batch = self.use(text_batch)
|
88 |
-
embeddings.append(emb_batch)
|
89 |
-
embeddings = np.vstack(embeddings)
|
90 |
-
return embeddings
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
def load_recommender(path, start_page=1):
|
95 |
-
global recommender
|
96 |
-
texts = pdf_to_text(path, start_page=start_page)
|
97 |
-
chunks = text_to_chunks(texts, start_page=start_page)
|
98 |
-
recommender.fit(chunks)
|
99 |
-
return 'Corpus Loaded.'
|
100 |
-
|
101 |
|
102 |
-
def
|
103 |
-
|
104 |
-
|
105 |
-
engine=engine,
|
106 |
-
prompt=prompt,
|
107 |
-
max_tokens=512,
|
108 |
-
n=1,
|
109 |
-
stop=None,
|
110 |
-
temperature=0.7,
|
111 |
-
)
|
112 |
-
message = completions.choices[0].text
|
113 |
-
return message
|
114 |
-
|
115 |
-
|
116 |
-
def generate_answer(question):
|
117 |
-
topn_chunks = recommender(question)
|
118 |
-
prompt = ""
|
119 |
-
prompt += 'search results:\n\n'
|
120 |
-
for c in topn_chunks:
|
121 |
-
prompt += c + '\n\n'
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
"with the same name, create separate answers for each. Only include information found in the results and "\
|
127 |
-
"don't add any additional information. Make sure the answer is correct and don't output false content. "\
|
128 |
-
"If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier "\
|
129 |
-
"search results which has nothing to do with the question. Only answer what is asked. The "\
|
130 |
-
"answer should be short and concise. \n\nQuery: {question}\nAnswer: "
|
131 |
-
|
132 |
-
prompt += f"Query: {question}\nAnswer:"
|
133 |
-
answer = generate_text(prompt,"text-davinci-003")
|
134 |
-
return answer
|
135 |
-
|
136 |
-
|
137 |
-
def question_answer(url, file, question):
|
138 |
-
if url.strip() == '' and file == None:
|
139 |
-
return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'
|
140 |
-
|
141 |
-
if url.strip() != '' and file != None:
|
142 |
-
return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'
|
143 |
-
|
144 |
-
if url.strip() != '':
|
145 |
-
glob_url = url
|
146 |
-
download_pdf(glob_url, 'corpus.pdf')
|
147 |
-
load_recommender('corpus.pdf')
|
148 |
-
|
149 |
-
else:
|
150 |
-
old_file_name = file.name
|
151 |
-
file_name = file.name
|
152 |
-
file_name = file_name[:-12] + file_name[-4:]
|
153 |
-
os.rename(old_file_name, file_name)
|
154 |
-
load_recommender(file_name)
|
155 |
-
|
156 |
-
if question.strip() == '':
|
157 |
-
return '[ERROR]: Question field is empty'
|
158 |
-
|
159 |
-
return generate_answer(question)
|
160 |
-
|
161 |
|
162 |
recommender = SemanticSearch()
|
163 |
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
with gr.Blocks() as demo:
|
169 |
-
|
170 |
-
gr.Markdown(f'<center><h1>{title}</h1></center>')
|
171 |
-
gr.Markdown(description)
|
172 |
-
|
173 |
-
with gr.Row():
|
174 |
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import urllib.request
|
2 |
+
import fitz # PyMuPDF
|
3 |
import re
|
4 |
import numpy as np
|
5 |
import tensorflow_hub as hub
|
|
|
|
|
|
|
6 |
from sklearn.neighbors import NearestNeighbors
|
7 |
+
import os
|
8 |
+
import gradio as gr
|
9 |
|
10 |
def download_pdf(url, output_path):
|
11 |
+
try:
|
12 |
+
urllib.request.urlretrieve(url, output_path)
|
13 |
+
return True
|
14 |
+
except Exception as e:
|
15 |
+
print(f"Error downloading PDF: {e}")
|
16 |
+
return False
|
17 |
|
18 |
def preprocess(text):
|
19 |
text = text.replace('\n', ' ')
|
20 |
+
text = re.sub('\s+', ' ', text).strip()
|
21 |
return text
|
22 |
|
|
|
23 |
def pdf_to_text(path, start_page=1, end_page=None):
|
24 |
+
try:
|
25 |
+
doc = fitz.open(path)
|
26 |
+
total_pages = doc.page_count
|
27 |
|
28 |
+
if end_page is None:
|
29 |
+
end_page = total_pages
|
30 |
|
31 |
+
text_list = []
|
32 |
|
33 |
+
for i in range(start_page - 1, end_page):
|
34 |
+
page = doc.load_page(i)
|
35 |
+
text = page.get_text("text")
|
36 |
+
text = preprocess(text)
|
37 |
+
text_list.append(text)
|
|
|
|
|
38 |
|
39 |
+
doc.close()
|
40 |
+
return text_list
|
41 |
+
except Exception as e:
|
42 |
+
print(f"Error in PDF to text conversion: {e}")
|
43 |
+
return None
|
44 |
|
45 |
def text_to_chunks(texts, word_length=150, start_page=1):
|
46 |
+
text_tokens = [t.split(' ') for t in texts]
|
|
|
47 |
chunks = []
|
48 |
+
|
49 |
+
for idx, words in enumerate(text_tokens):
|
50 |
for i in range(0, len(words), word_length):
|
51 |
+
chunk = words[i:i + word_length]
|
|
|
|
|
|
|
|
|
52 |
chunk = ' '.join(chunk).strip()
|
53 |
+
chunk = f'[Page no. {idx + start_page}] ' + chunk
|
54 |
chunks.append(chunk)
|
55 |
+
|
56 |
return chunks
|
57 |
|
|
|
58 |
class SemanticSearch:
|
|
|
59 |
def __init__(self):
|
60 |
self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
|
61 |
self.fitted = False
|
62 |
+
|
63 |
+
def fit(self, data):
|
|
|
64 |
self.data = data
|
65 |
+
self.embeddings = self.use(data)
|
66 |
+
self.nn = NearestNeighbors(n_neighbors=5)
|
|
|
67 |
self.nn.fit(self.embeddings)
|
68 |
self.fitted = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
+
def __call__(self, text):
|
71 |
+
if not self.fitted:
|
72 |
+
return "Model not fitted yet."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
query_embedding = self.use([text])
|
75 |
+
neighbors = self.nn.kneighbors(query_embedding, return_distance=False)[0]
|
76 |
+
return [self.data[i] for i in neighbors]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
recommender = SemanticSearch()
|
79 |
|
80 |
+
def gui(url, question):
|
81 |
+
if url.strip():
|
82 |
+
if not download_pdf(url, "temp.pdf"):
|
83 |
+
return "Failed to download PDF."
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
+
texts = pdf_to_text("temp.pdf")
|
86 |
+
if texts is None:
|
87 |
+
return "Failed to extract text from PDF."
|
88 |
+
|
89 |
+
chunks = text_to_chunks(texts)
|
90 |
+
recommender.fit(chunks)
|
91 |
+
else:
|
92 |
+
return "Please provide a valid URL."
|
93 |
+
|
94 |
+
if question.strip():
|
95 |
+
results = recommender(question)
|
96 |
+
return results
|
97 |
+
else:
|
98 |
+
return "Please enter a question."
|
99 |
+
|
100 |
+
iface = gr.Interface(
|
101 |
+
fn=gui,
|
102 |
+
inputs=["text", "text"],
|
103 |
+
outputs="text"
|
104 |
+
)
|
105 |
+
iface.launch()
|