Spaces:
Running
on
Zero
Running
on
Zero
ManishThota
commited on
Commit
•
d92fc94
1
Parent(s):
b9601bc
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,8 @@ import spaces
|
|
6 |
import gradio as gr
|
7 |
import os
|
8 |
|
|
|
|
|
9 |
quantization_config = BitsAndBytesConfig(
|
10 |
load_in_4bit=True,
|
11 |
bnb_4bit_compute_dtype=torch.float16
|
@@ -63,7 +65,7 @@ def process_video(video_file, question_parts):
|
|
63 |
]
|
64 |
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
65 |
# Prepare inputs for the model
|
66 |
-
input = processor([prompt], videos=[video_clip], padding=True, return_tensors="pt").to(
|
67 |
|
68 |
# Generate output
|
69 |
generate_kwargs = {"max_new_tokens": 3000, "do_sample": False, "top_p": 0.9}
|
|
|
6 |
import gradio as gr
|
7 |
import os
|
8 |
|
9 |
+
zero = torch.Tensor([0]).cuda()
|
10 |
+
|
11 |
quantization_config = BitsAndBytesConfig(
|
12 |
load_in_4bit=True,
|
13 |
bnb_4bit_compute_dtype=torch.float16
|
|
|
65 |
]
|
66 |
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
67 |
# Prepare inputs for the model
|
68 |
+
input = processor([prompt], videos=[video_clip], padding=True, return_tensors="pt").to(zero.device)
|
69 |
|
70 |
# Generate output
|
71 |
generate_kwargs = {"max_new_tokens": 3000, "do_sample": False, "top_p": 0.9}
|