Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -83,11 +83,11 @@ os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
|
|
83 |
|
84 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
85 |
|
86 |
-
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',
|
87 |
-
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16',
|
88 |
-
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16',
|
89 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16',
|
90 |
-
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',
|
91 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
92 |
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
|
93 |
|
@@ -99,15 +99,15 @@ def load_and_prepare_model():
|
|
99 |
add_watermarker=False,
|
100 |
text_encoder=None,
|
101 |
text_encoder_2=None,
|
102 |
-
tokenizer=None,
|
103 |
-
tokenizer_2=None,
|
104 |
-
scheduler=None,
|
105 |
-
unet=
|
106 |
vae=None,
|
107 |
)
|
108 |
-
pipe.scheduler=scheduler
|
109 |
-
pipe.tokenizer=tokenizer_1
|
110 |
-
pipe.tokenizer_2=tokenizer_2
|
111 |
pipe.unet=unet
|
112 |
#pipe.vae.do_resize=False
|
113 |
#pipe.vae.vae_scale_factor=8
|
|
|
83 |
|
84 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
85 |
|
86 |
+
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
87 |
+
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
88 |
+
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='tokenizer', token=True)
|
89 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='tokenizer_2', token=True)
|
90 |
+
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler', token=True)
|
91 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
92 |
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
|
93 |
|
|
|
99 |
add_watermarker=False,
|
100 |
text_encoder=None,
|
101 |
text_encoder_2=None,
|
102 |
+
#tokenizer=None,
|
103 |
+
#tokenizer_2=None,
|
104 |
+
#scheduler=None,
|
105 |
+
unet=unet,
|
106 |
vae=None,
|
107 |
)
|
108 |
+
#pipe.scheduler=scheduler
|
109 |
+
#pipe.tokenizer=tokenizer_1
|
110 |
+
#pipe.tokenizer_2=tokenizer_2
|
111 |
pipe.unet=unet
|
112 |
#pipe.vae.do_resize=False
|
113 |
#pipe.vae.vae_scale_factor=8
|