Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,11 +8,7 @@ import torch
|
|
8 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
9 |
from diffusers.utils import export_to_video
|
10 |
|
11 |
-
|
12 |
-
pipe_xl.vae.enable_slicing()
|
13 |
-
pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
|
14 |
-
pipe_xl.enable_model_cpu_offload()
|
15 |
-
pipe_xl.to("cuda")
|
16 |
|
17 |
def convert_mp4_to_frames(video_path, duration=3):
|
18 |
# Read the video file
|
@@ -58,6 +54,12 @@ def infer(prompt, video_in, denoise_strength):
|
|
58 |
|
59 |
video = convert_mp4_to_frames(video_in, duration=3)
|
60 |
video_resized = [Image.fromarray(frame).resize((1024, 576)) for frame in video]
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
video_frames = pipe_xl(prompt, negative_prompt=negative_prompt, video=video_resized, strength=denoise_strength).frames
|
62 |
del pipe_xl
|
63 |
torch.cuda.empty_cache()
|
|
|
8 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
9 |
from diffusers.utils import export_to_video
|
10 |
|
11 |
+
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def convert_mp4_to_frames(video_path, duration=3):
|
14 |
# Read the video file
|
|
|
54 |
|
55 |
video = convert_mp4_to_frames(video_in, duration=3)
|
56 |
video_resized = [Image.fromarray(frame).resize((1024, 576)) for frame in video]
|
57 |
+
|
58 |
+
pipe_xl = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/17")
|
59 |
+
pipe_xl.vae.enable_slicing()
|
60 |
+
pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
|
61 |
+
pipe_xl.enable_model_cpu_offload()
|
62 |
+
pipe_xl.to("cuda")
|
63 |
video_frames = pipe_xl(prompt, negative_prompt=negative_prompt, video=video_resized, strength=denoise_strength).frames
|
64 |
del pipe_xl
|
65 |
torch.cuda.empty_cache()
|