Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -83,20 +83,15 @@ os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
|
|
83 |
|
84 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
85 |
|
86 |
-
text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder',token=True)#.to(device=device, dtype=torch.bfloat16)
|
87 |
-
text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
88 |
-
tokenizer_1=CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer',token=True)
|
89 |
-
tokenizer_2=CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2',token=True)
|
90 |
-
scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler',token=True)
|
91 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
92 |
-
UNet2DConditionModel
|
|
|
93 |
def load_and_prepare_model():
|
94 |
-
#vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=True, token=True)
|
95 |
-
#vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
96 |
-
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
97 |
-
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
98 |
-
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", token=True) #, beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True, token=True)
|
99 |
-
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
100 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
101 |
'ford442/RealVisXL_V5.0_BF16',
|
102 |
#torch_dtype=torch.bfloat16,
|
@@ -185,7 +180,7 @@ def generate_30(
|
|
185 |
guidance_scale: float = 4,
|
186 |
num_inference_steps: int = 125,
|
187 |
use_resolution_binning: bool = True,
|
188 |
-
progress=gr.Progress(track_tqdm=True)
|
189 |
):
|
190 |
seed = random.randint(0, MAX_SEED)
|
191 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -227,7 +222,7 @@ def generate_60(
|
|
227 |
guidance_scale: float = 4,
|
228 |
num_inference_steps: int = 125,
|
229 |
use_resolution_binning: bool = True,
|
230 |
-
progress=gr.Progress(track_tqdm=True)
|
231 |
):
|
232 |
seed = random.randint(0, MAX_SEED)
|
233 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -269,7 +264,7 @@ def generate_90(
|
|
269 |
guidance_scale: float = 4,
|
270 |
num_inference_steps: int = 125,
|
271 |
use_resolution_binning: bool = True,
|
272 |
-
progress=gr.Progress(track_tqdm=True)
|
273 |
):
|
274 |
seed = random.randint(0, MAX_SEED)
|
275 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -341,6 +336,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
341 |
run_button_30 = gr.Button("Run 30 Seconds", scale=0)
|
342 |
run_button_60 = gr.Button("Run 60 Seconds", scale=0)
|
343 |
run_button_90 = gr.Button("Run 90 Seconds", scale=0)
|
|
|
344 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
345 |
|
346 |
with gr.Row():
|
|
|
83 |
|
84 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
85 |
|
86 |
+
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
87 |
+
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
88 |
+
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer', token=True)
|
89 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
90 |
+
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
91 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
92 |
+
unet = UNet2DConditionModel.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, subfolder='unet', token=True)
|
93 |
+
|
94 |
def load_and_prepare_model():
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
96 |
'ford442/RealVisXL_V5.0_BF16',
|
97 |
#torch_dtype=torch.bfloat16,
|
|
|
180 |
guidance_scale: float = 4,
|
181 |
num_inference_steps: int = 125,
|
182 |
use_resolution_binning: bool = True,
|
183 |
+
progress=gr.Progress(track_tqdm=True)
|
184 |
):
|
185 |
seed = random.randint(0, MAX_SEED)
|
186 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
222 |
guidance_scale: float = 4,
|
223 |
num_inference_steps: int = 125,
|
224 |
use_resolution_binning: bool = True,
|
225 |
+
progress=gr.Progress(track_tqdm=True)
|
226 |
):
|
227 |
seed = random.randint(0, MAX_SEED)
|
228 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
264 |
guidance_scale: float = 4,
|
265 |
num_inference_steps: int = 125,
|
266 |
use_resolution_binning: bool = True,
|
267 |
+
progress=gr.Progress(track_tqdm=True)
|
268 |
):
|
269 |
seed = random.randint(0, MAX_SEED)
|
270 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
336 |
run_button_30 = gr.Button("Run 30 Seconds", scale=0)
|
337 |
run_button_60 = gr.Button("Run 60 Seconds", scale=0)
|
338 |
run_button_90 = gr.Button("Run 90 Seconds", scale=0)
|
339 |
+
|
340 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
341 |
|
342 |
with gr.Row():
|