Souvik Biswas commited on
Commit
a76b236
1 Parent(s): 1ea460d

/home/vscode/rickB/app.py

Browse files
Files changed (1) hide show
  1. app.py +262 -0
app.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import json
4
+ from transformers import AutoModel, AutoTokenizer, Qwen2VLForConditionalGeneration, AutoProcessor
5
+ import torch
6
+ from PIL import Image
7
+ import numpy as np
8
+ import os
9
+ import base64
10
+ import io
11
+ import uuid
12
+ import tempfile
13
+ import time
14
+ import shutil
15
+ from pathlib import Path
16
+ import tiktoken
17
+ import verovio
18
+ model_name = "ucaslcl/GOT-OCR2_0"
19
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
20
+ model = AutoModel.from_pretrained(model_name, trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True).eval().cuda()
21
+
22
+ UPLOAD_FOLDER = "./uploads"
23
+ RESULTS_FOLDER = "./results"
24
+
25
+ for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
26
+ if not os.path.exists(folder):
27
+ os.makedirs(folder)
28
+
29
+ def image_to_base64(image):
30
+ buffered = io.BytesIO()
31
+ image.save(buffered, format="PNG")
32
+ return base64.b64encode(buffered.getvalue()).decode()
33
+
34
+ q_model_name = "Qwen/Qwen2-VL-2B-Instruct"
35
+ q_model = Qwen2VLForConditionalGeneration.from_pretrained(q_model_name, torch_dtype="auto").cuda().eval()
36
+ q_processor = AutoProcessor.from_pretrained(q_model_name, trust_remote_code=True)
37
+
38
+ def get_qwen_op(image_file, model, processor):
39
+ try:
40
+ image = Image.open(image_file).convert('RGB')
41
+ conversation = [
42
+ {
43
+ "role":"user",
44
+ "content":[
45
+ {
46
+ "type":"image",
47
+ },
48
+ {
49
+ "type":"text",
50
+ "text":"You are an accurate OCR engine. From the given image, extract the Hindi and other text."
51
+ }
52
+ ]
53
+ }
54
+ ]
55
+ text_prompt = q_processor.apply_chat_template(conversation, add_generation_prompt=True)
56
+ inputs = q_processor(text=[text_prompt], images=[image], padding=True, return_tensors="pt").to("cuda")
57
+ inputs = {k: v.to(torch.float32) if torch.is_floating_point(v) else v for k, v in inputs.items()}
58
+
59
+ generation_config = {
60
+ "max_new_tokens": 1089,
61
+ "do_sample": False,
62
+ "top_k": 20,
63
+ "top_p": 0.90,
64
+ "temperature": 0.4,
65
+ "pad_token_id": q_processor.tokenizer.pad_token_id,
66
+ "eos_token_id": q_processor.tokenizer.eos_token_id,
67
+ }
68
+
69
+ output_ids = q_model.generate(**inputs, **generation_config)
70
+ if 'input_ids' in inputs:
71
+ generated_ids = output_ids[:, inputs['input_ids'].shape[1]:]
72
+ else:
73
+ generated_ids = output_ids
74
+
75
+ output_text = q_processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
76
+
77
+ return output_text[:] if output_text else "No text extracted from the image."
78
+
79
+ except Exception as e:
80
+ return f"An error occurred: {str(e)}"
81
+
82
+ @spaces.GPU
83
+ def run_GOT(image, got_mode, fine_grained_mode="", ocr_color="", ocr_box=""):
84
+ unique_id = str(uuid.uuid4())
85
+ image_path = os.path.join(UPLOAD_FOLDER, f"{unique_id}.png")
86
+ result_path = os.path.join(RESULTS_FOLDER, f"{unique_id}.html")
87
+
88
+ shutil.copy(image, image_path)
89
+
90
+ try:
91
+ if got_mode == "plain texts OCR":
92
+ res = model.chat(tokenizer, image_path, ocr_type='ocr')
93
+ return res, None
94
+ elif got_mode == "format texts OCR":
95
+ res = model.chat(tokenizer, image_path, ocr_type='format', render=True, save_render_file=result_path)
96
+ elif got_mode == "plain multi-crop OCR":
97
+ res = model.chat_crop(tokenizer, image_path, ocr_type='ocr')
98
+ return res, None
99
+ elif got_mode == "format multi-crop OCR":
100
+ res = model.chat_crop(tokenizer, image_path, ocr_type='format', render=True, save_render_file=result_path)
101
+ elif got_mode == "plain fine-grained OCR":
102
+ res = model.chat(tokenizer, image_path, ocr_type='ocr', ocr_box=ocr_box, ocr_color=ocr_color)
103
+ return res, None
104
+ elif got_mode == "format fine-grained OCR":
105
+ res = model.chat(tokenizer, image_path, ocr_type='format', ocr_box=ocr_box, ocr_color=ocr_color, render=True, save_render_file=result_path)
106
+ elif got_mode == "English + Hindi(Qwen2-VL)":
107
+ res = get_qwen_op(image_path, q_model, q_processor)
108
+ return res, None
109
+ # res_markdown = f"$$ {res} $$"
110
+ res_markdown = res
111
+
112
+ if "format" in got_mode and os.path.exists(result_path):
113
+ with open(result_path, 'r') as f:
114
+ html_content = f.read()
115
+ encoded_html = base64.b64encode(html_content.encode('utf-8')).decode('utf-8')
116
+ iframe_src = f"data:text/html;base64,{encoded_html}"
117
+ iframe = f'<iframe src="{iframe_src}" width="100%" height="600px"></iframe>'
118
+ download_link = f'<a href="data:text/html;base64,{encoded_html}" download="result_{unique_id}.html">Download Full Result</a>'
119
+ return res_markdown, f"{download_link}<br>{iframe}"
120
+ else:
121
+ return res_markdown, None
122
+ except Exception as e:
123
+ return f"Error: {str(e)}", None
124
+ finally:
125
+ if os.path.exists(image_path):
126
+ os.remove(image_path)
127
+
128
+ def task_update(task):
129
+ if "fine-grained" in task:
130
+ return [
131
+ gr.update(visible=True),
132
+ gr.update(visible=False),
133
+ gr.update(visible=False),
134
+ ]
135
+ else:
136
+ return [
137
+ gr.update(visible=False),
138
+ gr.update(visible=False),
139
+ gr.update(visible=False),
140
+ ]
141
+
142
+ def fine_grained_update(task):
143
+ if task == "box":
144
+ return [
145
+ gr.update(visible=False, value = ""),
146
+ gr.update(visible=True),
147
+ ]
148
+ elif task == 'color':
149
+ return [
150
+ gr.update(visible=True),
151
+ gr.update(visible=False, value = ""),
152
+ ]
153
+
154
+ def search_in_text(text, keywords):
155
+ """Searches for keywords within the text and highlights matches."""
156
+
157
+ if not keywords:
158
+ return text
159
+
160
+ highlighted_text = text
161
+ for keyword in keywords.split():
162
+ highlighted_text = highlighted_text.replace(keyword, f"<mark>{keyword}</mark>")
163
+
164
+ return highlighted_text
165
+
166
+ def cleanup_old_files():
167
+ current_time = time.time()
168
+ for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
169
+ for file_path in Path(folder).glob('*'):
170
+ if current_time - file_path.stat().st_mtime > 3600: # 1 hour
171
+ file_path.unlink()
172
+
173
+ title_html = """ OCR Multilingual(GOT OCR 2.O) """
174
+
175
+ with gr.Blocks() as demo:
176
+ gr.HTML(title_html)
177
+ gr.Markdown("""
178
+ by Souvik Biswas
179
+
180
+ ### Guidelines
181
+ Upload your image below and select your preferred mode. Note that more characters may increase wait times.
182
+ - **Plain Texts OCR & Format Texts OCR:** Use these modes for basic image-level OCR.
183
+ - **Plain Multi-Crop OCR & Format Multi-Crop OCR:** Ideal for images with complex content, offering higher-quality results.
184
+ - **Plain Fine-Grained OCR & Format Fine-Grained OCR:** These modes allow you to specify fine-grained regions on the image for more flexible OCR. Regions can be defined by coordinates or colors (red, blue, green, black or white).
185
+
186
+ """)
187
+
188
+ with gr.Row():
189
+ with gr.Column():
190
+ image_input = gr.Image(type="filepath", label="upload your image")
191
+ task_dropdown = gr.Dropdown(
192
+ choices=[
193
+ "plain texts OCR",
194
+ "format texts OCR",
195
+ "plain multi-crop OCR",
196
+ "format multi-crop OCR",
197
+ "plain fine-grained OCR",
198
+ "format fine-grained OCR",
199
+ "English + Hindi(Qwen2-VL)"
200
+ ],
201
+ label="Choose one mode of GOT",
202
+ value="plain texts OCR"
203
+ )
204
+ fine_grained_dropdown = gr.Dropdown(
205
+ choices=["box", "color"],
206
+ label="fine-grained type",
207
+ visible=False
208
+ )
209
+ color_dropdown = gr.Dropdown(
210
+ choices=["red", "green", "blue", "black", "white"],
211
+ label="color list",
212
+ visible=False
213
+ )
214
+ box_input = gr.Textbox(
215
+ label="input box: [x1,y1,x2,y2]",
216
+ placeholder="e.g., [0,0,100,100]",
217
+ visible=False
218
+ )
219
+ submit_button = gr.Button("Submit")
220
+
221
+ with gr.Column():
222
+ ocr_result = gr.Textbox(label="GOT output")
223
+ # Create the Gradio interface
224
+ iface = gr.Interface(
225
+ fn=search_in_text,
226
+ inputs=[
227
+ ocr_result,
228
+ gr.Textbox(label="Keywords",
229
+ placeholder="search keyword e.g., The",
230
+ visible=True)],
231
+ outputs=gr.HTML(label="Search Results"),
232
+ allow_flagging="never"
233
+ )
234
+ with gr.Column():
235
+ if ocr_result.value:
236
+ with open("ocr_result.json", "w") as json_file:
237
+ json.dump({"text": ocr_result.value}, json_file) # Access the value of the Textbox using .value
238
+
239
+ with gr.Column():
240
+ gr.Markdown("**If you choose the mode with format, the mathpix result will be automatically rendered as follows:**")
241
+ html_result = gr.HTML(label="rendered html", show_label=True)
242
+
243
+ task_dropdown.change(
244
+ task_update,
245
+ inputs=[task_dropdown],
246
+ outputs=[fine_grained_dropdown, color_dropdown, box_input]
247
+ )
248
+ fine_grained_dropdown.change(
249
+ fine_grained_update,
250
+ inputs=[fine_grained_dropdown],
251
+ outputs=[color_dropdown, box_input]
252
+ )
253
+
254
+ submit_button.click(
255
+ run_GOT,
256
+ inputs=[image_input, task_dropdown, fine_grained_dropdown, color_dropdown, box_input],
257
+ outputs=[ocr_result, html_result]
258
+ )
259
+
260
+ if __name__ == "__main__":
261
+ cleanup_old_files()
262
+ demo.launch()