Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ from diffusers import (
|
|
9 |
StableDiffusion3Pipeline,
|
10 |
FluxPipeline
|
11 |
)
|
12 |
-
from transformers import BlipProcessor, BlipForConditionalGeneration
|
13 |
from pathlib import Path
|
14 |
from safetensors.torch import load_file
|
15 |
from huggingface_hub import hf_hub_download
|
@@ -72,7 +72,7 @@ def load_model(model_name):
|
|
72 |
return pipeline
|
73 |
|
74 |
# Initialize the default model
|
75 |
-
default_model = "
|
76 |
pipeline_text2image = load_model(default_model)
|
77 |
|
78 |
@spaces.GPU
|
@@ -225,12 +225,12 @@ This demo provides an insightful look into how current text-to-image models hand
|
|
225 |
model_dropdown = gr.Dropdown(
|
226 |
label="Choose a model",
|
227 |
choices=[
|
|
|
228 |
"stabilityai/stable-diffusion-3-medium-diffusers",
|
229 |
"stabilityai/sdxl-turbo",
|
230 |
"ByteDance/SDXL-Lightning",
|
231 |
"stabilityai/stable-diffusion-2",
|
232 |
"segmind/SSD-1B",
|
233 |
-
"black-forest-labs/FLUX.1-dev"
|
234 |
],
|
235 |
value=default_model
|
236 |
)
|
|
|
9 |
StableDiffusion3Pipeline,
|
10 |
FluxPipeline
|
11 |
)
|
12 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration, pipeline
|
13 |
from pathlib import Path
|
14 |
from safetensors.torch import load_file
|
15 |
from huggingface_hub import hf_hub_download
|
|
|
72 |
return pipeline
|
73 |
|
74 |
# Initialize the default model
|
75 |
+
default_model = "black-forest-labs/FLUX.1-dev"
|
76 |
pipeline_text2image = load_model(default_model)
|
77 |
|
78 |
@spaces.GPU
|
|
|
225 |
model_dropdown = gr.Dropdown(
|
226 |
label="Choose a model",
|
227 |
choices=[
|
228 |
+
"black-forest-labs/FLUX.1-dev"
|
229 |
"stabilityai/stable-diffusion-3-medium-diffusers",
|
230 |
"stabilityai/sdxl-turbo",
|
231 |
"ByteDance/SDXL-Lightning",
|
232 |
"stabilityai/stable-diffusion-2",
|
233 |
"segmind/SSD-1B",
|
|
|
234 |
],
|
235 |
value=default_model
|
236 |
)
|