prithivMLmods commited on
Commit
47473ae
1 Parent(s): 499319e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -41
app.py CHANGED
@@ -1,4 +1,10 @@
1
  #!/usr/bin/env python
 
 
 
 
 
 
2
  import os
3
  import random
4
  import uuid
@@ -8,6 +14,7 @@ from PIL import Image
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
 
11
 
12
  css = '''
13
  .gradio-container{max-width: 570px !important}
@@ -18,18 +25,22 @@ footer {
18
  '''
19
 
20
  DESCRIPTIONXX = """
21
- ## TEXT 2 IMAGE🥠
22
  """
 
23
  examples = [
24
 
25
- "Illustration of A starry night camp in the mountains, 4k, cinematic --ar 85:128 --v 6.0 --style raw",
26
- "A delicious ceviche cheesecake slice, 4k, octane render, ray tracing, Ultra-High-Definition"
27
  ]
28
 
29
  MODEL_OPTIONS = {
30
- "Lightning": "SG161222/RealVisXL_V4.0_Lightning",
31
- "Turbovision": "SG161222/RealVisXL_V3.0_Turbo",
32
-
 
 
 
33
  }
34
 
35
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
@@ -39,6 +50,43 @@ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
39
 
40
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def load_and_prepare_model(model_id):
43
  pipe = StableDiffusionXLPipeline.from_pretrained(
44
  model_id,
@@ -77,6 +125,7 @@ def generate(
77
  prompt: str,
78
  negative_prompt: str = "",
79
  use_negative_prompt: bool = False,
 
80
  seed: int = 1,
81
  width: int = 1024,
82
  height: int = 1024,
@@ -93,6 +142,8 @@ def generate(
93
  seed = int(randomize_seed_fn(seed, randomize_seed))
94
  generator = torch.Generator(device=device).manual_seed(seed)
95
 
 
 
96
  options = {
97
  "prompt": [prompt] * num_images,
98
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -118,22 +169,6 @@ def generate(
118
  image_paths = [save_image(img) for img in images]
119
  return image_paths, seed
120
 
121
- def load_predefined_images():
122
- predefined_images = [
123
- "assets/1.png",
124
- "assets/2.png",
125
- "assets/3.png",
126
- "assets/4.png",
127
- "assets/5.png",
128
- "assets/6.png",
129
- "assets/7.png",
130
- "assets/8.png",
131
- "assets/9.png",
132
- "assets/10.png",
133
- "assets/11.png",
134
- "assets/12.png",
135
- ]
136
- return predefined_images
137
 
138
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
139
  gr.Markdown(DESCRIPTIONXX)
@@ -145,21 +180,29 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
145
  placeholder="Enter your prompt",
146
  container=False,
147
  )
148
- run_button = gr.Button("Run", scale=0)
149
  result = gr.Gallery(label="Result", columns=1, show_label=False)
150
 
151
  with gr.Row():
152
  model_choice = gr.Dropdown(
153
- label="Model Selection",
154
  choices=list(MODEL_OPTIONS.keys()),
155
- value="Lightning"
156
  )
157
 
158
- with gr.Accordion("Advanced options", open=True, visible=False):
 
 
 
 
 
 
 
 
159
  num_images = gr.Slider(
160
  label="Number of Images",
161
  minimum=1,
162
- maximum=1,
163
  step=1,
164
  value=1,
165
  )
@@ -208,9 +251,9 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
208
  num_inference_steps = gr.Slider(
209
  label="Number of inference steps",
210
  minimum=1,
211
- maximum=35,
212
  step=1,
213
- value=20,
214
  )
215
 
216
  gr.Examples(
@@ -238,24 +281,41 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
238
  prompt,
239
  negative_prompt,
240
  use_negative_prompt,
 
241
  seed,
242
  width,
243
  height,
244
  guidance_scale,
245
  num_inference_steps,
246
- randomize_seed,
247
- num_images
248
  ],
249
  outputs=[result, seed],
250
- api_name="run",
251
  )
252
- gr.Markdown("🥠Models used in the playground [[Lightning]](https://huggingface.co/SG161222/RealVisXL_V4.0_Lightning), [[Turbo]](https://huggingface.co/SG161222/RealVisXL_V3.0_Turbo) for image generation. stable diffusion xl piped (sdxl) model HF. This is the demo space for generating images using the Stable Diffusion XL models, with multi different variants available.")
253
- gr.Markdown("🥠This is the demo space for generating images using Stable Diffusion with quality styles, different models and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.<a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.")
254
- gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
255
-
256
- with gr.Column(scale=3):
257
- gr.Markdown("### Image Gallery")
258
- predefined_gallery = gr.Gallery(label="Image Gallery", columns=3, show_label=False, value=load_predefined_images())
259
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  if __name__ == "__main__":
261
- demo.queue(max_size=20).launch(show_api=False)
 
1
  #!/usr/bin/env python
2
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
3
+ # of this software and associated documentation files (the "Software"), to deal
4
+ # in the Software without restriction, including without limitation the rights
5
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
6
+ # copies of the Software, and to permit persons to whom the Software is
7
+
8
  import os
9
  import random
10
  import uuid
 
14
  import spaces
15
  import torch
16
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
17
+ from typing import Tuple
18
 
19
  css = '''
20
  .gradio-container{max-width: 570px !important}
 
25
  '''
26
 
27
  DESCRIPTIONXX = """
28
+ ## MidJourney XL🏜️
29
  """
30
+
31
  examples = [
32
 
33
+ "Many apples splashed with drops of water within a fancy bowl 4k, hdr --v 6.0 --style raw",
34
+ "A profile photo of a dog, brown background, shot on Leica M6 --ar 128:85 --v 6.0 --style raw",
35
  ]
36
 
37
  MODEL_OPTIONS = {
38
+
39
+ "REALVISXL V5.0": "SG161222/RealVisXL_V5.0",
40
+ "LIGHTNING V5.0": "SG161222/RealVisXL_V5.0_Lightning",
41
+ "REALVISXL V4.0": "SG161222/RealVisXL_V4.0",
42
+ "LIGHTNING V4.0": "SG161222/RealVisXL_V4.0_Lightning",
43
+ "REALVISXL V3.0 TURBO": "SG161222/RealVisXL_V3.0_Turbo",
44
  }
45
 
46
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
 
50
 
51
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
52
 
53
+ style_list = [
54
+ {
55
+ "name": "3840 x 2160",
56
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
57
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
58
+ },
59
+ {
60
+ "name": "2560 x 1440",
61
+ "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
62
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
63
+ },
64
+ {
65
+ "name": "HD+",
66
+ "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
67
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
68
+ },
69
+ {
70
+ "name": "Style Zero",
71
+ "prompt": "{prompt}",
72
+ "negative_prompt": "",
73
+ },
74
+ ]
75
+
76
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
77
+ DEFAULT_STYLE_NAME = "3840 x 2160"
78
+ STYLE_NAMES = list(styles.keys())
79
+
80
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
81
+ if style_name in styles:
82
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
83
+ else:
84
+ p, n = styles[DEFAULT_STYLE_NAME]
85
+
86
+ if not negative:
87
+ negative = ""
88
+ return p.replace("{prompt}", positive), n + negative
89
+
90
  def load_and_prepare_model(model_id):
91
  pipe = StableDiffusionXLPipeline.from_pretrained(
92
  model_id,
 
125
  prompt: str,
126
  negative_prompt: str = "",
127
  use_negative_prompt: bool = False,
128
+ style_selection: str = DEFAULT_STYLE_NAME,
129
  seed: int = 1,
130
  width: int = 1024,
131
  height: int = 1024,
 
142
  seed = int(randomize_seed_fn(seed, randomize_seed))
143
  generator = torch.Generator(device=device).manual_seed(seed)
144
 
145
+ prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
146
+
147
  options = {
148
  "prompt": [prompt] * num_images,
149
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
169
  image_paths = [save_image(img) for img in images]
170
  return image_paths, seed
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
174
  gr.Markdown(DESCRIPTIONXX)
 
180
  placeholder="Enter your prompt",
181
  container=False,
182
  )
183
+ run_button = gr.Button("Run", scale=0)
184
  result = gr.Gallery(label="Result", columns=1, show_label=False)
185
 
186
  with gr.Row():
187
  model_choice = gr.Dropdown(
188
+ label="Model Selection🔻",
189
  choices=list(MODEL_OPTIONS.keys()),
190
+ value="LIGHTNING V5.0"
191
  )
192
 
193
+ with gr.Accordion("Advanced options", open=False, visible=False):
194
+ style_selection = gr.Radio(
195
+ show_label=True,
196
+ container=True,
197
+ interactive=True,
198
+ choices=STYLE_NAMES,
199
+ value=DEFAULT_STYLE_NAME,
200
+ label="Quality Style",
201
+ )
202
  num_images = gr.Slider(
203
  label="Number of Images",
204
  minimum=1,
205
+ maximum=5,
206
  step=1,
207
  value=1,
208
  )
 
251
  num_inference_steps = gr.Slider(
252
  label="Number of inference steps",
253
  minimum=1,
254
+ maximum=60,
255
  step=1,
256
+ value=28,
257
  )
258
 
259
  gr.Examples(
 
281
  prompt,
282
  negative_prompt,
283
  use_negative_prompt,
284
+ style_selection,
285
  seed,
286
  width,
287
  height,
288
  guidance_scale,
289
  num_inference_steps,
290
+ randomize_seed,
291
+ num_images,
292
  ],
293
  outputs=[result, seed],
 
294
  )
295
+
296
+ gr.Markdown(
297
+ """
298
+ <div style="text-align: justify;">
299
+ ⚡Models used in the playground: <a href="https://huggingface.co/SG161222/RealVisXL_V5.0">[REALVISXL V5.0]</a>, <a href="https://huggingface.co/SG161222/RealVisXL_V5.0_Lightning">[LIGHTNING V5.0]</a>, <a href="https://huggingface.co/SG161222/RealVisXL_V4.0">[REALVISXL V4.0]</a>, <a href="https://huggingface.co/SG161222/RealVisXL_V4.0_Lightning">[LIGHTNING V4.0]</a>, <a href="https://huggingface.co/SG161222/RealVisXL_V3.0_Turbo">[REALVISXL V3.0 TURBO]</a>
300
+ for image generation. Stable Diffusion XL piped (SDXL) model HF. This is the demo space for generating images using the Stable Diffusion XL models, with multiple different variants available.
301
+ </div>
302
+ """
303
+ )
304
+
305
+ gr.Markdown(
306
+ """
307
+ <div style="text-align: justify;">
308
+ ⚡This is the demo space for generating images using Stable Diffusion XL with quality styles, different models, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.
309
+ <a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.
310
+ </div>
311
+ """)
312
+
313
+ gr.Markdown(
314
+ """
315
+ <div style="text-align: justify;">
316
+ ⚠️ Users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
317
+ </div>
318
+ """)
319
+
320
  if __name__ == "__main__":
321
+ demo.queue(max_size=50).launch(show_api=False)