Spaces:
Running
on
Zero
Running
on
Zero
1inkusFace
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import spaces
|
2 |
import torch
|
3 |
import os
|
4 |
-
from diffusers import AutoencoderKLLTXVideo, LTXImageToVideoPipeline, LTXVideoTransformer3DModel
|
5 |
from diffusers.utils import export_to_video, load_image #, PIL_INTERPOLATION
|
6 |
|
7 |
import gradio as gr
|
@@ -16,7 +16,7 @@ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
16 |
torch.backends.cudnn.allow_tf32 = False
|
17 |
torch.backends.cudnn.deterministic = False
|
18 |
torch.backends.cudnn.benchmark = False
|
19 |
-
torch.backends.cuda.preferred_blas_library="cublas"
|
20 |
#torch.backends.cuda.preferred_linalg_library="cusolver"
|
21 |
torch.set_float32_matmul_precision("highest")
|
22 |
os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
|
@@ -30,8 +30,17 @@ single_file_url = "https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9
|
|
30 |
transformer = LTXVideoTransformer3DModel.from_single_file(single_file_url,token=HF_TOKEN)
|
31 |
|
32 |
#vae = AutoencoderKLLTXVideo.from_single_file(vae_url,token=HF_TOKEN)
|
|
|
33 |
|
34 |
-
pipe = LTXImageToVideoPipeline.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
@spaces.GPU(duration=80)
|
37 |
def generate_video(
|
@@ -46,6 +55,7 @@ def generate_video(
|
|
46 |
fps,
|
47 |
progress=gr.Progress(track_tqdm=True)
|
48 |
):
|
|
|
49 |
seed=random.randint(0, MAX_SEED)
|
50 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
51 |
image = Image.open(image_url).convert("RGB")
|
|
|
1 |
import spaces
|
2 |
import torch
|
3 |
import os
|
4 |
+
from diffusers import AutoencoderKLLTXVideo, LTXImageToVideoPipeline, LTXVideoTransformer3DModel, T5EncoderModel
|
5 |
from diffusers.utils import export_to_video, load_image #, PIL_INTERPOLATION
|
6 |
|
7 |
import gradio as gr
|
|
|
16 |
torch.backends.cudnn.allow_tf32 = False
|
17 |
torch.backends.cudnn.deterministic = False
|
18 |
torch.backends.cudnn.benchmark = False
|
19 |
+
#torch.backends.cuda.preferred_blas_library="cublas"
|
20 |
#torch.backends.cuda.preferred_linalg_library="cusolver"
|
21 |
torch.set_float32_matmul_precision("highest")
|
22 |
os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
|
|
|
30 |
transformer = LTXVideoTransformer3DModel.from_single_file(single_file_url,token=HF_TOKEN)
|
31 |
|
32 |
#vae = AutoencoderKLLTXVideo.from_single_file(vae_url,token=HF_TOKEN)
|
33 |
+
#vaeX = AutoencoderKLLTXVideo.from_pretrained("Lightricks/LTX-Video",subfolder='vae',token=HF_TOKEN)
|
34 |
|
35 |
+
pipe = LTXImageToVideoPipeline.from_pretrained(
|
36 |
+
"Lightricks/LTX-Video",
|
37 |
+
token=HF_TOKEN,
|
38 |
+
transformer=transformer,
|
39 |
+
text_encoder=None,
|
40 |
+
token=True
|
41 |
+
).to(torch.device("cuda"),torch.bfloat16)
|
42 |
+
|
43 |
+
text_encoder = T5EncoderModel.from_pretrained("Lightricks/LTX-Video",subfolder='text_encoder',token=True).to(torch.device("cuda"),torch.bfloat16)
|
44 |
|
45 |
@spaces.GPU(duration=80)
|
46 |
def generate_video(
|
|
|
55 |
fps,
|
56 |
progress=gr.Progress(track_tqdm=True)
|
57 |
):
|
58 |
+
pipe.text_encoder=text_encoder
|
59 |
seed=random.randint(0, MAX_SEED)
|
60 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
61 |
image = Image.open(image_url).convert("RGB")
|