Fabrice-TIERCELIN
commited on
Download text encoder
Browse files
app.py
CHANGED
@@ -9,6 +9,7 @@ import spaces
|
|
9 |
import torch
|
10 |
|
11 |
from hyvideo.utils.file_utils import save_videos_grid
|
|
|
12 |
from hyvideo.config import parse_args
|
13 |
from hyvideo.inference import HunyuanVideoSampler
|
14 |
from hyvideo.constants import NEGATIVE_PROMPT
|
@@ -17,6 +18,9 @@ from huggingface_hub import snapshot_download
|
|
17 |
|
18 |
if torch.cuda.device_count() > 0:
|
19 |
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
|
|
|
|
|
|
20 |
|
21 |
def initialize_model(model_path):
|
22 |
print('initialize_model: ' + model_path)
|
|
|
9 |
import torch
|
10 |
|
11 |
from hyvideo.utils.file_utils import save_videos_grid
|
12 |
+
from hyvideo.utils.preprocess_text_encoder_tokenizer_utils import preprocess_text_encoder_tokenizer
|
13 |
from hyvideo.config import parse_args
|
14 |
from hyvideo.inference import HunyuanVideoSampler
|
15 |
from hyvideo.constants import NEGATIVE_PROMPT
|
|
|
18 |
|
19 |
if torch.cuda.device_count() > 0:
|
20 |
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
21 |
+
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
|
22 |
+
preprocess_text_encoder_tokenizer(input_dir = "ckpts/llava-llama-3-8b-v1_1-transformers", output_dir = "ckpts/text_encoder")
|
23 |
+
snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
|
24 |
|
25 |
def initialize_model(model_path):
|
26 |
print('initialize_model: ' + model_path)
|