Update README.md
Browse files
README.md
CHANGED
@@ -45,6 +45,7 @@ library_name: transformers
|
|
45 |
To use this model, ensure you have `transformers >= 4.45.0` installed.
|
46 |
|
47 |
```python
|
|
|
48 |
import requests
|
49 |
from PIL import Image
|
50 |
from transformers import LlavaOnevisionForConditionalGeneration, AutoProcessor
|
@@ -76,7 +77,7 @@ prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
|
76 |
EOS_TOKEN = "<|im_end|>"
|
77 |
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
78 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
79 |
-
inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(device)
|
80 |
|
81 |
output = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
|
82 |
output = processor.decode(output[0][inputs.input_ids.shape[1]:])
|
|
|
45 |
To use this model, ensure you have `transformers >= 4.45.0` installed.
|
46 |
|
47 |
```python
|
48 |
+
import torch
|
49 |
import requests
|
50 |
from PIL import Image
|
51 |
from transformers import LlavaOnevisionForConditionalGeneration, AutoProcessor
|
|
|
77 |
EOS_TOKEN = "<|im_end|>"
|
78 |
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
79 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
80 |
+
inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(device, torch.float16)
|
81 |
|
82 |
output = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
|
83 |
output = processor.decode(output[0][inputs.input_ids.shape[1]:])
|