Spaces:
Running
on
Zero
Running
on
Zero
File size: 7,662 Bytes
cef4f97 22c7b5b cef4f97 71a766f 7a2763f aebaa46 cef4f97 22c7b5b ed456c1 22c7b5b cef4f97 00ee90b cef4f97 00ee90b cef4f97 aebaa46 cef4f97 71a766f aebaa46 7a2763f 353f4f2 aebaa46 7a2763f aebaa46 7a2763f cef4f97 71a766f cef4f97 22c7b5b cef4f97 22c7b5b cef4f97 f34dca6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer, AutoConfig
import os
import base64
import spaces
from loadimg import load_img
from PIL import Image
import numpy as np
title = """# 🙋🏻♂️Welcome to Tonic's🫴🏻📸GOT-OCR"""
description = """"
The GOT-OCR model is a revolutionary step in the evolution of OCR systems, boasting 580M parameters and the ability to process various forms of "characters." It features a high-compression encoder and a long-context decoder, making it well-suited for both scene- and document-style images. The model also supports multi-page and dynamic resolution OCR for added practicality.
The model can output results in a variety of formats, including plain text, markdown, and even complex outputs like TikZ diagrams or molecular SMILES strings. Interactive OCR allows users to specify regions of interest for OCR using coordinates or colors.
## Features
- **Plain Text OCR**: Recognizes and extracts plain text from images.
- **Formatted Text OCR**: Extracts text while preserving its formatting (tables, formulas, etc.).
- **Fine-grained OCR**: Box-based and color-based OCR for precise text extraction from specific regions.
- **Multi-crop OCR**: Processes multiple cropped regions within an image.
- **Rendered Formatted OCR Results**: Outputs OCR results in markdown, TikZ, SMILES, or other formats with rendered formatting.
GOT-OCR-2.0 can handle:
- Plain text
- Math/molecular formulas
- Tables
- Charts
- Sheet music
- Geometric shapes
## How to Use
1. Select a task from the dropdown menu.
2. Upload an image.
3. (Optional) Fill in additional parameters based on the task.
4. Click **Process** to see the results.
---
### Join us :
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) On 🤗Huggingface:[MultiTransformer](https://huggingface.co./MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/contribute)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""
model_name = 'ucaslcl/GOT-OCR2_0'
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
model = model.eval().cuda()
model.config.pad_token_id = tokenizer.eos_token_id
def load_image(image_file):
if isinstance(image_file, str):
if image_file.startswith('http') or image_file.startswith('https'):
return Image.open(requests.get(image_file, stream=True).raw).convert('RGB')
else:
return Image.open(image_file).convert('RGB')
else:
return image_file.convert('RGB')
@spaces.GPU
def process_image(image, task, ocr_type=None, ocr_box=None, ocr_color=None, render=False):
try:
img = load_image(image)
img_path = "/tmp/temp_image.png"
img.save(img_path)
if task == "Plain Text OCR":
res = model.chat(tokenizer, img_path, ocr_type='ocr')
elif task == "Format Text OCR":
res = model.chat(tokenizer, img_path, ocr_type='format')
elif task == "Fine-grained OCR (Box)":
res = model.chat(tokenizer, img_path, ocr_type=ocr_type, ocr_box=ocr_box)
elif task == "Fine-grained OCR (Color)":
res = model.chat(tokenizer, img_path, ocr_type=ocr_type, ocr_color=ocr_color)
elif task == "Multi-crop OCR":
res = model.chat_crop(tokenizer, image_file=img_path)
elif task == "Render Formatted OCR":
res = model.chat(tokenizer, img_path, ocr_type='format', render=True, save_render_file='./results/demo.html')
with open('./results/demo.html', 'r') as f:
html_content = f.read()
return res, html_content
# Clean up
os.remove(img_path)
return res, None
except Exception as e:
return str(e), None
def update_inputs(task):
if task == "Plain Text OCR" or task == "Format Text OCR" or task == "Multi-crop OCR":
return [gr.update(visible=False)] * 4
elif task == "Fine-grained OCR (Box)":
return [
gr.update(visible=True, choices=["ocr", "format"]),
gr.update(visible=True),
gr.update(visible=False),
gr.update(visible=False)
]
elif task == "Fine-grained OCR (Color)":
return [
gr.update(visible=True, choices=["ocr", "format"]),
gr.update(visible=False),
gr.update(visible=True, choices=["red", "green", "blue"]),
gr.update(visible=False)
]
elif task == "Render Formatted OCR":
return [gr.update(visible=False)] * 3 + [gr.update(visible=True)]
def ocr_demo(image, task, ocr_type, ocr_box, ocr_color):
res, html_content = process_image(image, task, ocr_type, ocr_box, ocr_color)
if html_content:
return res, html_content
return res, None
with gr.Blocks() as demo:
gr.Markdown(title)
gr.Markdown(description)
with gr.Row():
with gr.Column():
image_input = gr.Image(type="filepath", label="Input Image")
task_dropdown = gr.Dropdown(
choices=[
"Plain Text OCR",
"Format Text OCR",
"Fine-grained OCR (Box)",
"Fine-grained OCR (Color)",
"Multi-crop OCR",
"Render Formatted OCR"
],
label="Select Task",
value="Plain Text OCR"
)
ocr_type_dropdown = gr.Dropdown(
choices=["ocr", "format"],
label="OCR Type",
visible=False
)
ocr_box_input = gr.Textbox(
label="OCR Box (x1,y1,x2,y2)",
placeholder="e.g., 100,100,200,200",
visible=False
)
ocr_color_dropdown = gr.Dropdown(
choices=["red", "green", "blue"],
label="OCR Color",
visible=False
)
render_checkbox = gr.Checkbox(
label="Render Result",
visible=False
)
submit_button = gr.Button("Process")
with gr.Column():
output_text = gr.Textbox(label="OCR Result")
output_html = gr.HTML(label="Rendered HTML Output")
gr.Markdown("""## GOT-OCR 2.0
This small **330M parameter** model powerful OCR model can handle various text recognition tasks with high accuracy.
### Model Information
- **Model Name**: GOT-OCR 2.0
- **Hugging Face Repository**: [ucaslcl/GOT-OCR2_0](https://huggingface.co./ucaslcl/GOT-OCR2_0)
- **Environment**: CUDA 11.8 + PyTorch 2.0.1
""")
task_dropdown.change(
update_inputs,
inputs=[task_dropdown],
outputs=[ocr_type_dropdown, ocr_box_input, ocr_color_dropdown, render_checkbox]
)
submit_button.click(
ocr_demo,
inputs=[image_input, task_dropdown, ocr_type_dropdown, ocr_box_input, ocr_color_dropdown],
outputs=[output_text, output_html]
)
if __name__ == "__main__":
demo.launch() |