prithivMLmods commited on
Commit
be810f5
1 Parent(s): c45a279

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -34
app.py CHANGED
@@ -8,6 +8,7 @@ from PIL import Image
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
 
11
 
12
  css = '''
13
  .gradio-container{max-width: 570px !important}
@@ -28,7 +29,7 @@ examples = [
28
  MODEL_OPTIONS = {
29
  "Lightning": "SG161222/RealVisXL_V4.0_Lightning",
30
  "Turbovision": "SG161222/RealVisXL_V3.0_Turbo",
31
-
32
  }
33
 
34
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
@@ -39,13 +40,19 @@ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
39
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
40
 
41
  def load_and_prepare_model(model_id):
42
- pipe = StableDiffusionXLPipeline.from_pretrained(
43
- model_id,
44
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
45
- use_safetensors=True,
46
- add_watermarker=False,
47
- ).to(device)
48
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
 
 
 
 
49
 
50
  if USE_TORCH_COMPILE:
51
  pipe.compile()
@@ -117,31 +124,27 @@ def generate(
117
  image_paths = [save_image(img) for img in images]
118
  return image_paths, seed
119
 
120
- #def load_predefined_images():
121
- # predefined_images = [
122
- # "assets/1.png",
123
- # "assets/2.png",
124
- # "assets/3.png",
125
- # "assets/4.png",
126
- # "assets/5.png",
127
- # "assets/6.png",
128
- # "assets/7.png",
129
- # "assets/8.png",
130
- # "assets/9.png",
131
- # "assets/10.png",
132
- # "assets/11.png",
133
- # "assets/12.png",
134
- # ]
135
- # return predefined_images
 
 
136
 
137
 
138
  with gr.Blocks(css=css) as demo:
139
- gr.Markdown(
140
- f"""
141
- # Text🥠Image
142
- Models used in the playground [[Lightning]](https://huggingface.co/SG161222/RealVisXL_V4.0_Lightning), [[Realvision]](https://huggingface.co/) ,[[Turbo]](https://huggingface.co/SG161222/RealVisXL_V3.0_Turbo) for image generation. stable diffusion xl piped (sdxl) model HF. This is the demo space for generating images using the Stable Diffusion XL models, with multi different variants available. ⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
143
- """
144
- )
145
  with gr.Row():
146
  prompt = gr.Text(
147
  label="Prompt",
@@ -157,7 +160,7 @@ with gr.Blocks(css=css) as demo:
157
  model_choice = gr.Dropdown(
158
  label="Model Selection",
159
  choices=list(MODEL_OPTIONS.keys()),
160
- value="Lightning"
161
  )
162
 
163
  with gr.Accordion("Advanced options", open=True, visible=False):
@@ -254,8 +257,14 @@ with gr.Blocks(css=css) as demo:
254
  outputs=[result, seed],
255
  api_name="run",
256
  )
257
- # with gr.Column(scale=3):
258
- # gr.Markdown("### Image Gallery")
259
- # predefined_gallery = gr.Gallery(label="Image Gallery", columns=4, show_label=False, value=load_predefined_images())
 
 
 
 
 
 
260
  if __name__ == "__main__":
261
  demo.queue(max_size=20).launch(show_api=False)
 
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
+ from diffusers import AuraFlowPipeline
12
 
13
  css = '''
14
  .gradio-container{max-width: 570px !important}
 
29
  MODEL_OPTIONS = {
30
  "Lightning": "SG161222/RealVisXL_V4.0_Lightning",
31
  "Turbovision": "SG161222/RealVisXL_V3.0_Turbo",
32
+ "AuraFlow": "fal/AuraFlow"
33
  }
34
 
35
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
 
40
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
41
 
42
  def load_and_prepare_model(model_id):
43
+ if model_id == "fal/AuraFlow":
44
+ pipe = AuraFlowPipeline.from_pretrained(
45
+ model_id,
46
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
47
+ ).to(device)
48
+ else:
49
+ pipe = StableDiffusionXLPipeline.from_pretrained(
50
+ model_id,
51
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
52
+ use_safetensors=True,
53
+ add_watermarker=False,
54
+ ).to(device)
55
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
56
 
57
  if USE_TORCH_COMPILE:
58
  pipe.compile()
 
124
  image_paths = [save_image(img) for img in images]
125
  return image_paths, seed
126
 
127
+ def load_predefined_images():
128
+ predefined_images = [
129
+ "assets/1.png",
130
+ "assets/2.png",
131
+ "assets/3.png",
132
+ "assets/4.png",
133
+ "assets/5.png",
134
+ "assets/6.png",
135
+ "assets/7.png",
136
+ "assets/8.png",
137
+ "assets/9.png",
138
+ "assets/10.png",
139
+ "assets/11.png",
140
+ "assets/12.png",
141
+ ]
142
+ return predefined_images
143
+
144
+
145
 
146
 
147
  with gr.Blocks(css=css) as demo:
 
 
 
 
 
 
148
  with gr.Row():
149
  prompt = gr.Text(
150
  label="Prompt",
 
160
  model_choice = gr.Dropdown(
161
  label="Model Selection",
162
  choices=list(MODEL_OPTIONS.keys()),
163
+ value="AuraFlow"
164
  )
165
 
166
  with gr.Accordion("Advanced options", open=True, visible=False):
 
257
  outputs=[result, seed],
258
  api_name="run",
259
  )
260
+ gr.Markdown("🥠Models used in the playground [[Lightning]](https://huggingface.co/SG161222/RealVisXL_V4.0_Lightning), [[AuraFlow]](https://huggingface.co/fal/AuraFlow) ,[[Turbo]](https://huggingface.co/SG161222/RealVisXL_V3.0_Turbo) for image generation. stable diffusion xl piped (sdxl) model HF. This is the demo space for generating images using the Stable Diffusion XL models, with multi different variants available. ⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
261
+
262
+ gr.Markdown("🥠This is the demo space for generating images using Stable Diffusion with grids, filters, templates, quality styles, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.<a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.")
263
+ gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
264
+
265
+ with gr.Column(scale=3):
266
+ gr.Markdown("### Image Gallery")
267
+ predefined_gallery = gr.Gallery(label="Image Gallery", columns=4, show_label=False, value=load_predefined_images())
268
+
269
  if __name__ == "__main__":
270
  demo.queue(max_size=20).launch(show_api=False)