Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from omegaconf import OmegaConf
|
|
6 |
import os
|
7 |
from models import get_models
|
8 |
from diffusers.utils.import_utils import is_xformers_available
|
9 |
-
from
|
10 |
from diffusers.models import AutoencoderKL
|
11 |
from models.clip import TextEmbedder
|
12 |
from datasets import video_transforms
|
@@ -17,7 +17,6 @@ from einops import rearrange
|
|
17 |
import torchvision
|
18 |
import sys
|
19 |
from PIL import Image
|
20 |
-
from ip_adapter.ip_adapter_transform import ip_scale_set, ip_transform_model
|
21 |
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
22 |
from transformers.image_transforms import convert_to_rgb
|
23 |
try:
|
@@ -26,12 +25,7 @@ try:
|
|
26 |
from diffusion import create_diffusion
|
27 |
from download import find_model
|
28 |
except:
|
29 |
-
# sys.path.append(os.getcwd())
|
30 |
sys.path.append(os.path.split(sys.path[0])[0])
|
31 |
-
# ไปฃ็ ่งฃ้
|
32 |
-
# sys.path[0] : ๅพๅฐC:\Users\maxu\Desktop\blog_test\pakage2
|
33 |
-
# os.path.split(sys.path[0]) : ๅพๅฐ['C:\Users\maxu\Desktop\blog_test',pakage2']
|
34 |
-
# mmcls ้้ข่ทจๅ
ๅผ็จๆฏๅ ไธบๅฎ่ฃ
ไบmmcls
|
35 |
|
36 |
import utils
|
37 |
|
@@ -176,7 +170,7 @@ def init_model():
|
|
176 |
global clip_image_processor
|
177 |
print('Initializing ShowMaker', flush=True)
|
178 |
parser = argparse.ArgumentParser()
|
179 |
-
parser.add_argument("--config", type=str, default="./configs/
|
180 |
args = parser.parse_args()
|
181 |
args = OmegaConf.load(args.config)
|
182 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -368,6 +362,6 @@ with gr.Blocks() as demo:
|
|
368 |
clear = gr.Button("Restart")
|
369 |
run.click(gen_or_pre, [text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step], [output_video])
|
370 |
|
371 |
-
|
372 |
|
373 |
-
demo.launch(server_name="0.0.0.0", server_port=10034, enable_queue=True)
|
|
|
6 |
import os
|
7 |
from models import get_models
|
8 |
from diffusers.utils.import_utils import is_xformers_available
|
9 |
+
from vlogger.STEB.model_transform import tca_transform_model, ip_scale_set, ip_transform_model
|
10 |
from diffusers.models import AutoencoderKL
|
11 |
from models.clip import TextEmbedder
|
12 |
from datasets import video_transforms
|
|
|
17 |
import torchvision
|
18 |
import sys
|
19 |
from PIL import Image
|
|
|
20 |
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
21 |
from transformers.image_transforms import convert_to_rgb
|
22 |
try:
|
|
|
25 |
from diffusion import create_diffusion
|
26 |
from download import find_model
|
27 |
except:
|
|
|
28 |
sys.path.append(os.path.split(sys.path[0])[0])
|
|
|
|
|
|
|
|
|
29 |
|
30 |
import utils
|
31 |
|
|
|
170 |
global clip_image_processor
|
171 |
print('Initializing ShowMaker', flush=True)
|
172 |
parser = argparse.ArgumentParser()
|
173 |
+
parser.add_argument("--config", type=str, default="./configs/with_mask_ref_sample.yaml")
|
174 |
args = parser.parse_args()
|
175 |
args = OmegaConf.load(args.config)
|
176 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
362 |
clear = gr.Button("Restart")
|
363 |
run.click(gen_or_pre, [text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step], [output_video])
|
364 |
|
365 |
+
demo.launch(share=True, enable_queue=True)
|
366 |
|
367 |
+
# demo.launch(server_name="0.0.0.0", server_port=10034, enable_queue=True)
|