pcuenq HF staff commited on
Commit
19722f5
β€’
1 Parent(s): 967ea4d

Update: transformers 4.35, examples

Browse files
.gitattributes CHANGED
@@ -37,3 +37,6 @@ assets/captioning_example_2.png filter=lfs diff=lfs merge=lfs -text
37
  assets/vqa_example_1.png filter=lfs diff=lfs merge=lfs -text
38
  assets/vqa_example_2.png filter=lfs diff=lfs merge=lfs -text
39
  assets/docvqa_example.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
37
  assets/vqa_example_1.png filter=lfs diff=lfs merge=lfs -text
38
  assets/vqa_example_2.png filter=lfs diff=lfs merge=lfs -text
39
  assets/docvqa_example.png filter=lfs diff=lfs merge=lfs -text
40
+ assets/food.png filter=lfs diff=lfs merge=lfs -text
41
+ assets/girl_hat.png filter=lfs diff=lfs merge=lfs -text
42
+ assets/jobs.png filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -2,15 +2,13 @@ import gradio as gr
2
  import re
3
  import torch
4
  from PIL import Image
5
- from transformers import AutoTokenizer, FuyuForCausalLM, FuyuImageProcessor, FuyuProcessor
6
 
7
  model_id = "adept/fuyu-8b"
8
  dtype = torch.bfloat16
9
- device = "cuda"
10
 
11
- tokenizer = AutoTokenizer.from_pretrained(model_id)
12
- model = FuyuForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=dtype)
13
- processor = FuyuProcessor(image_processor=FuyuImageProcessor(), tokenizer=tokenizer)
14
 
15
  CAPTION_PROMPT = "Generate a coco-style caption.\n"
16
  DETAILED_CAPTION_PROMPT = "What is happening in this image?"
@@ -38,12 +36,11 @@ def pad_to_size(image, canvas_width=1920, canvas_height=1080):
38
 
39
  def predict(image, prompt):
40
  # image = image.convert('RGB')
41
- model_inputs = processor(text=prompt, images=[image])
42
- model_inputs = {k: v.to(dtype=dtype if torch.is_floating_point(v) else v.dtype, device=device) for k,v in model_inputs.items()}
43
 
44
  generation_output = model.generate(**model_inputs, max_new_tokens=50)
45
  prompt_len = model_inputs["input_ids"].shape[-1]
46
- return tokenizer.decode(generation_output[0][prompt_len:], skip_special_tokens=True)
47
 
48
  def caption(image, detailed_captioning):
49
  if detailed_captioning:
@@ -55,43 +52,6 @@ def caption(image, detailed_captioning):
55
  def set_example_image(example: list) -> dict:
56
  return gr.Image.update(value=example[0])
57
 
58
- def scale_factor_to_fit(original_size, target_size=(1920, 1080)):
59
- width, height = original_size
60
- max_width, max_height = target_size
61
- if width <= max_width and height <= max_height:
62
- return 1.0
63
- return min(max_width/width, max_height/height)
64
-
65
- def tokens_to_box(tokens, original_size):
66
- bbox_start = tokenizer.convert_tokens_to_ids("<0x00>")
67
- bbox_end = tokenizer.convert_tokens_to_ids("<0x01>")
68
- try:
69
- # Assumes a single box
70
- bbox_start_pos = (tokens == bbox_start).nonzero(as_tuple=True)[0].item()
71
- bbox_end_pos = (tokens == bbox_end).nonzero(as_tuple=True)[0].item()
72
-
73
- if bbox_end_pos != bbox_start_pos + 5:
74
- return tokens
75
-
76
- # Retrieve transformed coordinates from tokens
77
- coords = tokenizer.convert_ids_to_tokens(tokens[bbox_start_pos+1:bbox_end_pos])
78
-
79
- # Scale back to original image size and multiply by 2
80
- scale = scale_factor_to_fit(original_size)
81
- top, left, bottom, right = [2 * int(float(c)/scale) for c in coords]
82
-
83
- # Replace the IDs so they get detokenized right
84
- replacement = f" <box>{top}, {left}, {bottom}, {right}</box>"
85
- replacement = tokenizer.tokenize(replacement)[1:]
86
- replacement = tokenizer.convert_tokens_to_ids(replacement)
87
- replacement = torch.tensor(replacement).to(tokens)
88
-
89
- tokens = torch.cat([tokens[:bbox_start_pos], replacement, tokens[bbox_end_pos+1:]], 0)
90
- return tokens
91
- except:
92
- gr.Error("Can't convert tokens.")
93
- return tokens
94
-
95
  def coords_from_response(response):
96
  # y1, x1, y2, x2
97
  pattern = r"<box>(\d+),\s*(\d+),\s*(\d+),\s*(\d+)</box>"
@@ -111,14 +71,12 @@ def localize(image, query):
111
  padded = resize_to_max(image)
112
  padded = pad_to_size(padded)
113
 
114
- model_inputs = processor(text=prompt, images=[padded])
115
- model_inputs = {k: v.to(dtype=dtype if torch.is_floating_point(v) else v.dtype, device=device) for k,v in model_inputs.items()}
116
 
117
- generation_output = model.generate(**model_inputs, max_new_tokens=40)
118
- prompt_len = model_inputs["input_ids"].shape[-1]
119
- tokens = generation_output[0][prompt_len:]
120
- tokens = tokens_to_box(tokens, image.size)
121
- decoded = tokenizer.decode(tokens, skip_special_tokens=True)
122
  coords = coords_from_response(decoded)
123
  return image, [(coords, f"Location of \"{query}\"")]
124
 
@@ -145,6 +103,13 @@ with gr.Blocks(css=css) as demo:
145
  """
146
  )
147
  with gr.Tab("Visual Question Answering"):
 
 
 
 
 
 
 
148
  with gr.Row():
149
  with gr.Column():
150
  image_input = gr.Image(label="Upload your Image", type="pil")
@@ -154,8 +119,14 @@ with gr.Blocks(css=css) as demo:
154
  vqa_btn = gr.Button("Answer Visual Question")
155
 
156
  gr.Examples(
157
- [["assets/vqa_example_1.png", "How is this made?"], ["assets/vqa_example_2.png", "What is this flower and where is it's origin?"],
158
- ["assets/docvqa_example.png", "How many items are sold?"], ["assets/screen2words_ui_example.png", "What is this app about?"]],
 
 
 
 
 
 
159
  inputs = [image_input, text_input],
160
  outputs = [vqa_output],
161
  fn=predict,
@@ -173,7 +144,7 @@ with gr.Blocks(css=css) as demo:
173
  captioning_btn = gr.Button("Generate Caption")
174
 
175
  gr.Examples(
176
- [["assets/captioning_example_1.png", False], ["assets/captioning_example_2.png", True]],
177
  inputs = [captioning_input, detailed_captioning_checkbox],
178
  outputs = [captioning_output],
179
  fn=caption,
 
2
  import re
3
  import torch
4
  from PIL import Image
5
+ from transformers import FuyuForCausalLM, FuyuProcessor
6
 
7
  model_id = "adept/fuyu-8b"
8
  dtype = torch.bfloat16
 
9
 
10
+ model = FuyuForCausalLM.from_pretrained(model_id, device_map="cuda", torch_dtype=dtype)
11
+ processor = FuyuProcessor.from_pretrained(model_id)
 
12
 
13
  CAPTION_PROMPT = "Generate a coco-style caption.\n"
14
  DETAILED_CAPTION_PROMPT = "What is happening in this image?"
 
36
 
37
  def predict(image, prompt):
38
  # image = image.convert('RGB')
39
+ model_inputs = processor(text=prompt, images=[image]).to(device=model.device)
 
40
 
41
  generation_output = model.generate(**model_inputs, max_new_tokens=50)
42
  prompt_len = model_inputs["input_ids"].shape[-1]
43
+ return processor.decode(generation_output[0][prompt_len:], skip_special_tokens=True)
44
 
45
  def caption(image, detailed_captioning):
46
  if detailed_captioning:
 
52
  def set_example_image(example: list) -> dict:
53
  return gr.Image.update(value=example[0])
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  def coords_from_response(response):
56
  # y1, x1, y2, x2
57
  pattern = r"<box>(\d+),\s*(\d+),\s*(\d+),\s*(\d+)</box>"
 
71
  padded = resize_to_max(image)
72
  padded = pad_to_size(padded)
73
 
74
+ model_inputs = processor(text=prompt, images=[padded]).to(device=model.device)
 
75
 
76
+ outputs = model.generate(**model_inputs, max_new_tokens=40)
77
+ post_processed_bbox_tokens = processor.post_process_box_coordinates(outputs)[0]
78
+ decoded = processor.decode(post_processed_bbox_tokens, skip_special_tokens=True)
79
+ decoded = decoded.split('\x04', 1)[1] if '\x04' in decoded else ''
 
80
  coords = coords_from_response(decoded)
81
  return image, [(coords, f"Location of \"{query}\"")]
82
 
 
103
  """
104
  )
105
  with gr.Tab("Visual Question Answering"):
106
+ gr.Markdown(
107
+ """
108
+ You can use natural-language questions to ask about the image. However, since this is a base model not fine-tuned for \
109
+ chat instructions, you may get better results by following a prompt format similar to the one used during training. See the \
110
+ examples below for details!
111
+ """
112
+ )
113
  with gr.Row():
114
  with gr.Column():
115
  image_input = gr.Image(label="Upload your Image", type="pil")
 
119
  vqa_btn = gr.Button("Answer Visual Question")
120
 
121
  gr.Examples(
122
+ [
123
+ ["assets/vqa_example_1.png", "What's the name of this dessert, and how is it made?\n"],
124
+ ["assets/vqa_example_2.png", "What is this flower and where is it's origin?"],
125
+ ["assets/food.png", "Answer the following VQAv2 question based on the image.\nWhat type of foods are in the image?"],
126
+ ["assets/jobs.png", "Answer the following DocVQA question based on the image.\nWhich is the metro in California that has a good job Outlook?"],
127
+ ["assets/docvqa_example.png", "How many items are sold?"],
128
+ ["assets/screen2words_ui_example.png", "What is this app about?"],
129
+ ],
130
  inputs = [image_input, text_input],
131
  outputs = [vqa_output],
132
  fn=predict,
 
144
  captioning_btn = gr.Button("Generate Caption")
145
 
146
  gr.Examples(
147
+ [["assets/captioning_example_1.png", False], ["assets/girl_hat.png", True]],
148
  inputs = [captioning_input, detailed_captioning_checkbox],
149
  outputs = [captioning_output],
150
  fn=caption,
assets/{captioning_example_2.png β†’ food.png} RENAMED
File without changes
assets/girl_hat.png ADDED

Git LFS Details

  • SHA256: 3248af441169e4ba93ff571364877041c9d4307e6718d487f62ffe8185f539f2
  • Pointer size: 132 Bytes
  • Size of remote file: 1.2 MB
assets/jobs.png ADDED

Git LFS Details

  • SHA256: abceb259fd35ffe157443136df055c348023b499d83a230080b42add7e50520e
  • Pointer size: 132 Bytes
  • Size of remote file: 2.62 MB
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- git+https://github.com/huggingface/transformers.git
2
  accelerate
3
  torch==2.0.1
 
1
+ transformers>=4.35.0
2
  accelerate
3
  torch==2.0.1