Spaces:
Runtime error
Runtime error
File size: 1,618 Bytes
9ff61cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig, AutoTokenizer, Qwen2TokenizerFast
from PIL import Image
import torch
import requests
from accelerate import init_empty_weights
USE_GPU = True
device = torch.device("cuda" if USE_GPU and torch.cuda.is_available() else "cpu")
processor = AutoProcessor.from_pretrained(
'allenai/MolmoE-1B-0924',
trust_remote_code=True,
torch_dtype='auto',
device_map='auto' if USE_GPU else None,
cache_dir="./models/molmo1"
)
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(
'allenai/MolmoE-1B-0924',
trust_remote_code=True,
torch_dtype='auto',
device_map='auto' if USE_GPU else None,
cache_dir="./models/molmo1",
attn_implementation="eager"
)
if not USE_GPU:
model.to(device)
model.tie_weights()
image_path = "./public/image.jpg" # Replace with your image file path
image = Image.open(image_path)
image = image.convert("RGB")
inputs = processor.process(
images=[image],
text="Extract text"
)
inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
print('STARTED')
output = model.generate_from_batch(
inputs,
GenerationConfig(
max_new_tokens=2000,
# temperature=0.1,
# top_p=top_p,
stop_strings="<|endoftext|>"
),
tokenizer=processor.tokenizer
)
# Only get generated tokens; decode them to text
generated_tokens = output[0, inputs['input_ids'].size(1):]
generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
print(generated_text)
|