Spaces:
Sleeping
Sleeping
File size: 1,137 Bytes
e515122 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 11b5222 1128ae6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import subprocess
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
import gradio as gr
from PIL import Image
from transformers import AutoModelForCausalLM, AutoProcessor
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = "Chan-Y/Florence-2-LaTex-v2"
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
def process_image(image):
if image:
inputs = processor(images=image, return_tensors="pt").to(device)
outputs = model.generate(**inputs)
text = processor.decode(outputs[0], skip_special_tokens=True)
return text
return "No image provided."
with gr.Blocks() as demo:
with gr.Row():
image_input = gr.Image(label="Upload an Image", type="pil")
process_button = gr.Button("Process Image")
output = gr.Textbox(label="Output Message")
process_button.click(process_image, inputs=image_input, outputs=output)
demo.launch()
|