ZhangYuanhan commited on
Commit
3c8d458
1 Parent(s): bf1fb81

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -9
README.md CHANGED
@@ -144,7 +144,9 @@ This model support at most 64 frames.
144
 
145
  ### Intended use
146
 
147
- The model was trained on [LLaVA-Video-178K](https://huggingface.co/datasets/lmms-lab/LLaVA-NeXT-Video-SFT-Data) and [LLaVA-OneVision Dataset](https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-Data), having have the ability to interact with images, multi-image and videos, but specific to videos.
 
 
148
 
149
  **Feel free to share your generations in the Community tab!**
150
 
@@ -166,9 +168,7 @@ import sys
166
  import warnings
167
  from decord import VideoReader, cpu
168
  import numpy as np
169
-
170
  warnings.filterwarnings("ignore")
171
-
172
  def load_video(self, video_path, max_frames_num,fps=1,force_sample=False):
173
  if max_frames_num == 0:
174
  return np.zeros((1, 336, 336, 3))
@@ -186,14 +186,12 @@ def load_video(self, video_path, max_frames_num,fps=1,force_sample=False):
186
  frame_time = ",".join([f"{i:.2f}s" for i in frame_time])
187
  spare_frames = vr.get_batch(frame_idx).asnumpy()
188
  # import pdb;pdb.set_trace()
189
-
190
  return spare_frames,frame_time,video_time
191
-
192
  pretrained = "lmms-lab/LLaVA-NeXT-Video-72B-Qwen2"
193
  model_name = "llava_qwen"
194
  device = "cuda"
195
  device_map = "auto"
196
- tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, device_map=device_map) # Add any other thing you want to pass in llava_model_args
197
  model.eval()
198
  video_path = "XXXX"
199
  max_frames_num = "64"
@@ -201,7 +199,8 @@ video,frame_time,video_time = load_video(video_path, max_frames_num, 1, force_sa
201
  video = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].cuda().bfloat16()
202
  video = [video]
203
  conv_template = "qwen_1_5" # Make sure you use correct chat template for different models
204
- question = DEFAULT_IMAGE_TOKEN + "\nPlease describe this video in detail."
 
205
  conv = copy.deepcopy(conv_templates[conv_template])
206
  conv.append_message(conv.roles[0], question)
207
  conv.append_message(conv.roles[1], None)
@@ -210,12 +209,12 @@ input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX,
210
  cont = model.generate(
211
  input_ids,
212
  images=video,
213
- modalities=["video"],
214
  do_sample=False,
215
  temperature=0,
216
  max_new_tokens=4096,
217
  )
218
- text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True)
219
  print(text_outputs)
220
  ```
221
 
 
144
 
145
  ### Intended use
146
 
147
+ The model was trained on [LLaVA-Video-178K](https://huggingface.co/datasets/lmms-lab/LLaVA-NeXT-Video-SFT-Data) and [LLaVA-OneVision Dataset](https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-Data), having the ability to interact with images, multi-image and videos, but specific to videos.
148
+
149
+
150
 
151
  **Feel free to share your generations in the Community tab!**
152
 
 
168
  import warnings
169
  from decord import VideoReader, cpu
170
  import numpy as np
 
171
  warnings.filterwarnings("ignore")
 
172
  def load_video(self, video_path, max_frames_num,fps=1,force_sample=False):
173
  if max_frames_num == 0:
174
  return np.zeros((1, 336, 336, 3))
 
186
  frame_time = ",".join([f"{i:.2f}s" for i in frame_time])
187
  spare_frames = vr.get_batch(frame_idx).asnumpy()
188
  # import pdb;pdb.set_trace()
 
189
  return spare_frames,frame_time,video_time
 
190
  pretrained = "lmms-lab/LLaVA-NeXT-Video-72B-Qwen2"
191
  model_name = "llava_qwen"
192
  device = "cuda"
193
  device_map = "auto"
194
+ tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, torch_dtype="bfloat16", device_map=device_map) # Add any other thing you want to pass in llava_model_args
195
  model.eval()
196
  video_path = "XXXX"
197
  max_frames_num = "64"
 
199
  video = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].cuda().bfloat16()
200
  video = [video]
201
  conv_template = "qwen_1_5" # Make sure you use correct chat template for different models
202
+ time_instruciton = f"The video lasts for {video_time:.2f} seconds, and {len(video[0])} frames are uniformly sampled from it. These frames are located at {frame_time}.Please answer the following questions related to this video."
203
+ question = DEFAULT_IMAGE_TOKEN + f"{time_instruciton}\nPlease describe this video in detail."
204
  conv = copy.deepcopy(conv_templates[conv_template])
205
  conv.append_message(conv.roles[0], question)
206
  conv.append_message(conv.roles[1], None)
 
209
  cont = model.generate(
210
  input_ids,
211
  images=video,
212
+ modalities= ["video"],
213
  do_sample=False,
214
  temperature=0,
215
  max_new_tokens=4096,
216
  )
217
+ text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True)[0].strip()
218
  print(text_outputs)
219
  ```
220