Spaces:
Sleeping
Sleeping
import subprocess | |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) | |
import gradio as gr | |
from PIL import Image | |
from transformers import AutoModelForCausalLM, AutoProcessor | |
import torch | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model_name = "Chan-Y/Florence-2-LaTex-v2" | |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device) | |
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) | |
def process_image(image): | |
if image: | |
inputs = processor(images=image, return_tensors="pt").to(device) | |
outputs = model.generate(**inputs) | |
text = processor.decode(outputs[0], skip_special_tokens=True) | |
return text | |
return "No image provided." | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
image_input = gr.Image(label="Upload an Image", type="pil") | |
process_button = gr.Button("Process Image") | |
output = gr.Textbox(label="Output Message") | |
process_button.click(process_image, inputs=image_input, outputs=output) | |
demo.launch() | |