Ligeng-Zhu commited on
Commit
eb202aa
·
verified ·
1 Parent(s): e95854d

Upload files with `vila-upload`.

Browse files

Upload utils.py
Upload auto_processor.py
Upload README.md
Upload mm_utils.py
Upload modeling_vila.py

Files changed (5) hide show
  1. README.md +6 -5
  2. auto_processor.py +25 -22
  3. mm_utils.py +1 -1
  4. modeling_vila.py +5 -5
  5. utils.py +9 -3
README.md CHANGED
@@ -12,7 +12,8 @@ tags:
12
  Dependency setups:
13
 
14
  ```bash
15
- pip install transformers==4.46 accelerate opencv-python torchvision einops
 
16
  pip install git+https://github.com/bfshi/scaling_on_scales.git
17
  ```
18
 
@@ -24,7 +25,7 @@ from termcolor import colored
24
 
25
  model_path = "Efficient-Large-Model/NVILA-Lite-2B-hf-preview"
26
 
27
- # you can use config
28
  config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
29
  model = AutoModel.from_config(config, trust_remote_code=True)
30
  # or directly from_pretrained
@@ -74,13 +75,13 @@ output_ids = model.generate(
74
  },
75
  media_config={
76
  "image": {}
77
- },
78
  generation_config=model.generation_config,
79
  max_new_tokens=256,
80
  )
81
  print(processor.tokenizer.decode(output_ids[0], skip_special_tokens=True))
82
 
83
- ##### the above code is equivalent to
84
  # response = model.generate_content([
85
  # PIL.Image.open("demo_images/demo_img_1.png"),
86
  # "describe the image?"
@@ -103,4 +104,4 @@ if osp.isdir(output_dir):
103
  shutil.rmtree(output_dir)
104
  from llava.remote_code.modeling_vila import VILAForCasualLM
105
  VILAForCasualLM.convert_vila_dev_ckpt_to_remote(model_path, output_dir, copy=False)
106
- ```
 
12
  Dependency setups:
13
 
14
  ```bash
15
+ # other transformers version may also work, but we have not tested
16
+ pip install transformers==4.46 accelerate opencv-python torchvision einops pillow
17
  pip install git+https://github.com/bfshi/scaling_on_scales.git
18
  ```
19
 
 
25
 
26
  model_path = "Efficient-Large-Model/NVILA-Lite-2B-hf-preview"
27
 
28
+ # you can use config
29
  config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
30
  model = AutoModel.from_config(config, trust_remote_code=True)
31
  # or directly from_pretrained
 
75
  },
76
  media_config={
77
  "image": {}
78
+ },
79
  generation_config=model.generation_config,
80
  max_new_tokens=256,
81
  )
82
  print(processor.tokenizer.decode(output_ids[0], skip_special_tokens=True))
83
 
84
+ ##### the above code is equivalent to
85
  # response = model.generate_content([
86
  # PIL.Image.open("demo_images/demo_img_1.png"),
87
  # "describe the image?"
 
104
  shutil.rmtree(output_dir)
105
  from llava.remote_code.modeling_vila import VILAForCasualLM
106
  VILAForCasualLM.convert_vila_dev_ckpt_to_remote(model_path, output_dir, copy=False)
107
+ ```
auto_processor.py CHANGED
@@ -1,8 +1,9 @@
1
- import os, os.path as osp
 
2
  from collections import defaultdict
3
  from typing import List, Union
4
 
5
- from transformers import AutoModel, AutoTokenizer, AutoConfig, AutoImageProcessor, AutoProcessor
6
  from transformers.feature_extraction_utils import BatchFeature
7
  from transformers.image_utils import ImageInput, VideoInput
8
  from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
@@ -10,9 +11,8 @@ from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
10
  from transformers.utils import logging
11
 
12
  from .constants import DEFAULT_IMAGE_TOKEN, MEDIA_TOKENS
13
- from .media import Image, Video
14
  from .mm_utils import process_image, process_images
15
- from .media import extract_media
16
  from .tokenizer_utils import tokenize_conversation
17
 
18
 
@@ -41,7 +41,7 @@ class VILAProcessor(ProcessorMixin):
41
  self.image_processor = image_processor
42
  self.tokenizer = tokenizer
43
  super().__init__(image_processor, tokenizer, chat_template=chat_template)
44
-
45
  @classmethod
46
  def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
47
  if os.path.isdir(pretrained_model_name_or_path):
@@ -49,16 +49,23 @@ class VILAProcessor(ProcessorMixin):
49
  else:
50
  print(f"pretrained_model_name_or_path {pretrained_model_name_or_path} is not a directory, downloading")
51
  from huggingface_hub import HfApi, snapshot_download
 
52
  pretrained_model_name_or_path = snapshot_download(pretrained_model_name_or_path)
53
 
54
- image_processor = AutoImageProcessor.from_pretrained(osp.join(pretrained_model_name_or_path, "vision_tower"), trust_remote_code=True)
55
- tokenizer = AutoTokenizer.from_pretrained(osp.join(pretrained_model_name_or_path, "llm"), trust_remote_code=True)
 
 
 
 
56
  config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True)
57
-
58
  return cls(image_processor=image_processor, tokenizer=tokenizer, config=config)
59
 
60
  def __repr__(self):
61
- return f"VILAProcessor(image_processor={self.image_processor}, tokenizer={self.tokenizer}, config={self.config})"
 
 
62
 
63
  def __call__(
64
  self,
@@ -145,12 +152,9 @@ class VILAProcessor(ProcessorMixin):
145
  # inputs = processor(conversation=llavaconv, padding=True, return_tensors="pt")
146
  def apply_chat_template(self, conversation, add_generation_prompt=True, **kwargs):
147
  vila_conv = []
148
-
149
  for chat in conversation:
150
- vila_chat = {
151
- "from": "",
152
- "value": []
153
- }
154
  if chat["role"] == "user":
155
  # user allows to input image and text
156
  vila_chat["from"] = "human"
@@ -167,9 +171,10 @@ class VILAProcessor(ProcessorMixin):
167
  assert content["type"] == "text", f"Unsupported content type: {content['type']}"
168
  vila_chat["value"].append(content["text"])
169
  vila_conv.append(vila_chat)
170
-
171
  return self(vila_conv)
172
 
 
173
  if __name__ == "__main__":
174
  # gpt style: user, assistant
175
  # vila style: human, gpt
@@ -178,8 +183,8 @@ if __name__ == "__main__":
178
  "role": "user",
179
  "content": [
180
  {"type": "image", "path": "demo_images/demo_img_1.png"},
181
- {"type": "text", "text": "Describe this image."}
182
- ]
183
  }
184
  ]
185
 
@@ -211,7 +216,7 @@ if __name__ == "__main__":
211
  tokenizer=model.tokenizer,
212
  )
213
 
214
- # TODO: add padding, return_tensors,
215
  inputs = processor(conversation=llavaconv, padding=True, return_tensors="pt")
216
  print(inputs.keys(), inputs.input_ids.shape, [_.shape for _ in inputs.image])
217
  print("vila conv pass")
@@ -225,10 +230,8 @@ if __name__ == "__main__":
225
  media={
226
  "image": inputs.image,
227
  },
228
- media_config={
229
- "image": {}
230
- },
231
  generation_config=model.generation_config,
232
  max_new_tokens=100,
233
  )
234
- print(output_ids)
 
1
+ import os
2
+ import os.path as osp
3
  from collections import defaultdict
4
  from typing import List, Union
5
 
6
+ from transformers import AutoConfig, AutoImageProcessor, AutoModel, AutoProcessor, AutoTokenizer
7
  from transformers.feature_extraction_utils import BatchFeature
8
  from transformers.image_utils import ImageInput, VideoInput
9
  from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
 
11
  from transformers.utils import logging
12
 
13
  from .constants import DEFAULT_IMAGE_TOKEN, MEDIA_TOKENS
14
+ from .media import Image, Video, extract_media
15
  from .mm_utils import process_image, process_images
 
16
  from .tokenizer_utils import tokenize_conversation
17
 
18
 
 
41
  self.image_processor = image_processor
42
  self.tokenizer = tokenizer
43
  super().__init__(image_processor, tokenizer, chat_template=chat_template)
44
+
45
  @classmethod
46
  def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
47
  if os.path.isdir(pretrained_model_name_or_path):
 
49
  else:
50
  print(f"pretrained_model_name_or_path {pretrained_model_name_or_path} is not a directory, downloading")
51
  from huggingface_hub import HfApi, snapshot_download
52
+
53
  pretrained_model_name_or_path = snapshot_download(pretrained_model_name_or_path)
54
 
55
+ image_processor = AutoImageProcessor.from_pretrained(
56
+ osp.join(pretrained_model_name_or_path, "vision_tower"), trust_remote_code=True
57
+ )
58
+ tokenizer = AutoTokenizer.from_pretrained(
59
+ osp.join(pretrained_model_name_or_path, "llm"), trust_remote_code=True
60
+ )
61
  config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True)
62
+
63
  return cls(image_processor=image_processor, tokenizer=tokenizer, config=config)
64
 
65
  def __repr__(self):
66
+ return (
67
+ f"VILAProcessor(image_processor={self.image_processor}, tokenizer={self.tokenizer}, config={self.config})"
68
+ )
69
 
70
  def __call__(
71
  self,
 
152
  # inputs = processor(conversation=llavaconv, padding=True, return_tensors="pt")
153
  def apply_chat_template(self, conversation, add_generation_prompt=True, **kwargs):
154
  vila_conv = []
155
+
156
  for chat in conversation:
157
+ vila_chat = {"from": "", "value": []}
 
 
 
158
  if chat["role"] == "user":
159
  # user allows to input image and text
160
  vila_chat["from"] = "human"
 
171
  assert content["type"] == "text", f"Unsupported content type: {content['type']}"
172
  vila_chat["value"].append(content["text"])
173
  vila_conv.append(vila_chat)
174
+
175
  return self(vila_conv)
176
 
177
+
178
  if __name__ == "__main__":
179
  # gpt style: user, assistant
180
  # vila style: human, gpt
 
183
  "role": "user",
184
  "content": [
185
  {"type": "image", "path": "demo_images/demo_img_1.png"},
186
+ {"type": "text", "text": "Describe this image."},
187
+ ],
188
  }
189
  ]
190
 
 
216
  tokenizer=model.tokenizer,
217
  )
218
 
219
+ # TODO: add padding, return_tensors,
220
  inputs = processor(conversation=llavaconv, padding=True, return_tensors="pt")
221
  print(inputs.keys(), inputs.input_ids.shape, [_.shape for _ in inputs.image])
222
  print("vila conv pass")
 
230
  media={
231
  "image": inputs.image,
232
  },
233
+ media_config={"image": {}},
 
 
234
  generation_config=model.generation_config,
235
  max_new_tokens=100,
236
  )
237
+ print(output_ids)
mm_utils.py CHANGED
@@ -26,7 +26,7 @@ import torch
26
  from PIL import Image
27
  from transformers import StoppingCriteria
28
 
29
- from llava.constants import DEFAULT_IMAGE_TOKEN
30
 
31
 
32
  def get_frame_from_vcap(vidcap, num_frames=10, max_fps=0.0, fps=None, frame_count=None, video_file_name=None):
 
26
  from PIL import Image
27
  from transformers import StoppingCriteria
28
 
29
+ from .constants import DEFAULT_IMAGE_TOKEN
30
 
31
 
32
  def get_frame_from_vcap(vidcap, num_frames=10, max_fps=0.0, fps=None, frame_count=None, video_file_name=None):
modeling_vila.py CHANGED
@@ -38,6 +38,7 @@ from transformers import (
38
  from transformers.modeling_outputs import CausalLMOutputWithPast
39
  from transformers.modeling_utils import ContextManagers, no_init_weights
40
 
 
41
  from .base_projector import MultimodalProjector, MultimodalProjectorConfig
42
  from .builder import build_llm_and_tokenizer
43
  from .configuration_vila import VILAConfig
@@ -49,7 +50,7 @@ from .mm_utils import process_image, process_images
49
  from .siglip_encoder import SiglipVisionTower, SiglipVisionTowerDynamicS2, SiglipVisionTowerS2
50
  from .tokenizer_utils import tokenize_conversation
51
  from .utils import get_model_config, load_tokenizer_then_handle_media_tokens_and_chat_template
52
- from .auto_processor import VILAProcessor
53
 
54
  # from llava.constants import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, NUM_EXTRA_TOKENS
55
  # quick hack for remote code
@@ -230,7 +231,7 @@ class VILAPretrainedModel(PreTrainedModel):
230
  api = HfApi()
231
  model_path = snapshot_download(model_path, local_dir=output_dir)
232
  print("downloading HF model to", model_path)
233
-
234
  if check_dot_in_model_path(model_path) and output_dir is None:
235
  raise ValueError(
236
  f"Model path {model_path} contains a dot, which will affect the remote code loading. Please specify the output directory without dot in the path to fix this issue."
@@ -280,10 +281,10 @@ class VILAPretrainedModel(PreTrainedModel):
280
  src_fname = os.path.join(current_folder, file_name)
281
  dst_fname = os.path.join(output_dir, "README.md")
282
  if os.path.exists(dst_fname):
283
- old_reamde = open(dst_fname, 'r').read()
284
  else:
285
  old_reamde = ""
286
- with open(src_fname, 'r') as src, open(dst_fname, 'w') as dst:
287
  dst.write(src.read())
288
  dst.write(old_reamde)
289
  print("[HF remote code] REAMDE ", src_fname, "to", dst_fname)
@@ -299,7 +300,6 @@ class VILAPretrainedModel(PreTrainedModel):
299
  os.remove(os.path.join(output_dir, file_name))
300
  os.symlink(full_file_name, os.path.join(output_dir, file_name))
301
  print("[HF remote code] linking", full_file_name, "to", output_dir)
302
-
303
 
304
  def save_pretrained(self, output_dir, state_dict=None):
305
  if state_dict is None:
 
38
  from transformers.modeling_outputs import CausalLMOutputWithPast
39
  from transformers.modeling_utils import ContextManagers, no_init_weights
40
 
41
+ from .auto_processor import VILAProcessor
42
  from .base_projector import MultimodalProjector, MultimodalProjectorConfig
43
  from .builder import build_llm_and_tokenizer
44
  from .configuration_vila import VILAConfig
 
50
  from .siglip_encoder import SiglipVisionTower, SiglipVisionTowerDynamicS2, SiglipVisionTowerS2
51
  from .tokenizer_utils import tokenize_conversation
52
  from .utils import get_model_config, load_tokenizer_then_handle_media_tokens_and_chat_template
53
+
54
 
55
  # from llava.constants import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, NUM_EXTRA_TOKENS
56
  # quick hack for remote code
 
231
  api = HfApi()
232
  model_path = snapshot_download(model_path, local_dir=output_dir)
233
  print("downloading HF model to", model_path)
234
+
235
  if check_dot_in_model_path(model_path) and output_dir is None:
236
  raise ValueError(
237
  f"Model path {model_path} contains a dot, which will affect the remote code loading. Please specify the output directory without dot in the path to fix this issue."
 
281
  src_fname = os.path.join(current_folder, file_name)
282
  dst_fname = os.path.join(output_dir, "README.md")
283
  if os.path.exists(dst_fname):
284
+ old_reamde = open(dst_fname).read()
285
  else:
286
  old_reamde = ""
287
+ with open(src_fname) as src, open(dst_fname, "w") as dst:
288
  dst.write(src.read())
289
  dst.write(old_reamde)
290
  print("[HF remote code] REAMDE ", src_fname, "to", dst_fname)
 
300
  os.remove(os.path.join(output_dir, file_name))
301
  os.symlink(full_file_name, os.path.join(output_dir, file_name))
302
  print("[HF remote code] linking", full_file_name, "to", output_dir)
 
303
 
304
  def save_pretrained(self, output_dir, state_dict=None):
305
  if state_dict is None:
utils.py CHANGED
@@ -19,15 +19,20 @@ import os.path as osp
19
 
20
  from huggingface_hub import repo_exists, snapshot_download
21
  from huggingface_hub.utils import HFValidationError, validate_repo_id
22
- from transformers import AutoConfig, PretrainedConfig, AutoTokenizer
23
 
24
  from .configuration_vila import VILAConfig
25
  from .constants import MEDIA_TOKENS
26
  from .tokenizer_utils import infer_stop_tokens
27
 
28
- def load_tokenizer_then_handle_media_tokens_and_chat_template(model_name_or_path, config: VILAConfig, model_max_length=None):
 
 
 
29
  # TODO(ligeng): a lot of copy-paste code, refactor to make a single function
30
- tokenizer = AutoTokenizer.from_pretrained(osp.join(model_name_or_path, "llm"), padding_side="right", use_fast=True, legacy=False)
 
 
31
  if model_max_length is not None:
32
  tokenizer.model_max_length = model_max_length
33
 
@@ -54,6 +59,7 @@ def load_tokenizer_then_handle_media_tokens_and_chat_template(model_name_or_path
54
 
55
  return tokenizer
56
 
 
57
  def get_model_config(config):
58
  default_keys = ["llm_cfg", "vision_tower_cfg", "mm_projector_cfg"]
59
 
 
19
 
20
  from huggingface_hub import repo_exists, snapshot_download
21
  from huggingface_hub.utils import HFValidationError, validate_repo_id
22
+ from transformers import AutoConfig, AutoTokenizer, PretrainedConfig
23
 
24
  from .configuration_vila import VILAConfig
25
  from .constants import MEDIA_TOKENS
26
  from .tokenizer_utils import infer_stop_tokens
27
 
28
+
29
+ def load_tokenizer_then_handle_media_tokens_and_chat_template(
30
+ model_name_or_path, config: VILAConfig, model_max_length=None
31
+ ):
32
  # TODO(ligeng): a lot of copy-paste code, refactor to make a single function
33
+ tokenizer = AutoTokenizer.from_pretrained(
34
+ osp.join(model_name_or_path, "llm"), padding_side="right", use_fast=True, legacy=False
35
+ )
36
  if model_max_length is not None:
37
  tokenizer.model_max_length = model_max_length
38
 
 
59
 
60
  return tokenizer
61
 
62
+
63
  def get_model_config(config):
64
  default_keys = ["llm_cfg", "vision_tower_cfg", "mm_projector_cfg"]
65