John6666 commited on
Commit
798447d
·
verified ·
1 Parent(s): 469e051

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +368 -376
app.py CHANGED
@@ -1,376 +1,368 @@
1
- import os
2
- if os.environ.get("SPACES_ZERO_GPU") is not None:
3
- import spaces
4
- else:
5
- class spaces:
6
- @staticmethod
7
- def GPU(func):
8
- def wrapper(*args, **kwargs):
9
- return func(*args, **kwargs)
10
- return wrapper
11
- import gradio as gr
12
- from gradio_imageslider import ImageSlider
13
- import torch
14
- torch.jit.script = lambda f: f
15
- from hidiffusion import apply_hidiffusion
16
- from diffusers import (
17
- ControlNetModel,
18
- StableDiffusionXLControlNetImg2ImgPipeline,
19
- DDIMScheduler,
20
- )
21
- from controlnet_aux import AnylineDetector
22
- from compel import Compel, ReturnedEmbeddingsType
23
- from PIL import Image
24
- import time
25
- import numpy as np
26
-
27
- IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
28
- IS_SPACE = os.environ.get("SPACE_ID", None) is not None
29
-
30
- device = "cuda" if torch.cuda.is_available() else "cpu"
31
- dtype = torch.float16
32
-
33
- LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
34
-
35
- print(f"device: {device}")
36
- print(f"dtype: {dtype}")
37
- print(f"low memory: {LOW_MEMORY}")
38
-
39
-
40
- model = "stabilityai/stable-diffusion-xl-base-1.0"
41
- # model = "stabilityai/sdxl-turbo"
42
- # vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
43
- scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
44
- # controlnet = ControlNetModel.from_pretrained(
45
- # "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
46
- # )
47
- controlnet = ControlNetModel.from_pretrained(
48
- "TheMistoAI/MistoLine",
49
- torch_dtype=torch.float16,
50
- revision="refs/pr/3",
51
- variant="fp16",
52
- )
53
- pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
54
- model,
55
- controlnet=controlnet,
56
- torch_dtype=dtype,
57
- variant="fp16",
58
- use_safetensors=True,
59
- scheduler=scheduler,
60
- )
61
-
62
- compel = Compel(
63
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
64
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
65
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
66
- requires_pooled=[False, True],
67
- )
68
- pipe = pipe.to(device)
69
-
70
- if not IS_SPACES_ZERO:
71
- apply_hidiffusion(pipe)
72
- # pipe.enable_xformers_memory_efficient_attention()
73
- pipe.enable_model_cpu_offload()
74
- pipe.enable_vae_tiling()
75
-
76
- anyline = AnylineDetector.from_pretrained(
77
- "TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
78
- ).to(device)
79
-
80
-
81
- def pad_image(image):
82
- w, h = image.size
83
- if w == h:
84
- return image
85
- elif w > h:
86
- new_image = Image.new(image.mode, (w, w), (0, 0, 0))
87
- pad_w = 0
88
- pad_h = (w - h) // 2
89
- new_image.paste(image, (0, pad_h))
90
- return new_image
91
- else:
92
- new_image = Image.new(image.mode, (h, h), (0, 0, 0))
93
- pad_w = (h - w) // 2
94
- pad_h = 0
95
- new_image.paste(image, (pad_w, 0))
96
- return new_image
97
-
98
-
99
- @spaces.GPU(duration=120)
100
- def predict(
101
- input_image,
102
- prompt,
103
- negative_prompt,
104
- seed,
105
- guidance_scale=8.5,
106
- scale=2,
107
- controlnet_conditioning_scale=0.5,
108
- strength=1.0,
109
- controlnet_start=0.0,
110
- controlnet_end=1.0,
111
- guassian_sigma=2.0,
112
- intensity_threshold=3,
113
- progress=gr.Progress(track_tqdm=True),
114
- ):
115
- if IS_SPACES_ZERO:
116
- apply_hidiffusion(pipe)
117
- if input_image is None:
118
- raise gr.Error("Please upload an image.")
119
- padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
120
- conditioning, pooled = compel([prompt, negative_prompt])
121
- generator = torch.manual_seed(seed)
122
- last_time = time.time()
123
- anyline_image = anyline(
124
- padded_image,
125
- detect_resolution=1280,
126
- guassian_sigma=max(0.01, guassian_sigma),
127
- intensity_threshold=intensity_threshold,
128
- )
129
-
130
- images = pipe(
131
- image=padded_image,
132
- control_image=anyline_image,
133
- strength=strength,
134
- prompt_embeds=conditioning[0:1],
135
- pooled_prompt_embeds=pooled[0:1],
136
- negative_prompt_embeds=conditioning[1:2],
137
- negative_pooled_prompt_embeds=pooled[1:2],
138
- width=1024 * scale,
139
- height=1024 * scale,
140
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
141
- controlnet_start=float(controlnet_start),
142
- controlnet_end=float(controlnet_end),
143
- generator=generator,
144
- num_inference_steps=30,
145
- guidance_scale=guidance_scale,
146
- eta=1.0,
147
- )
148
- print(f"Time taken: {time.time() - last_time}")
149
- return (padded_image, images.images[0]), padded_image, anyline_image
150
-
151
-
152
- css = """
153
- #intro{
154
- # max-width: 32rem;
155
- # text-align: center;
156
- # margin: 0 auto;
157
- }
158
- """
159
-
160
- with gr.Blocks(css=css) as demo:
161
- gr.Markdown(
162
- """
163
- # Enhance This
164
- ### HiDiffusion SDXL
165
-
166
- [HiDiffusion](https://github.com/megvii-research/HiDiffusion) enables higher-resolution image generation.
167
- You can upload an initial image and prompt to generate an enhanced version.
168
- SDXL Controlnet [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
169
- [Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL?duplicate=true) to avoid the queue.
170
-
171
- <small>
172
- <b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun!
173
-
174
- </small>
175
- """,
176
- elem_id="intro",
177
- )
178
- with gr.Row():
179
- with gr.Column(scale=1):
180
- image_input = gr.Image(type="pil", label="Input Image")
181
- prompt = gr.Textbox(
182
- label="Prompt",
183
- info="The prompt is very important to get the desired results. Please try to describe the image as best as you can. Accepts Compel Syntax",
184
- )
185
- negative_prompt = gr.Textbox(
186
- label="Negative Prompt",
187
- value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
188
- )
189
- seed = gr.Slider(
190
- minimum=0,
191
- maximum=2**64 - 1,
192
- value=1415926535897932,
193
- step=1,
194
- label="Seed",
195
- randomize=True,
196
- )
197
- with gr.Accordion(label="Advanced", open=False):
198
- guidance_scale = gr.Slider(
199
- minimum=0,
200
- maximum=50,
201
- value=8.5,
202
- step=0.001,
203
- label="Guidance Scale",
204
- )
205
- scale = gr.Slider(
206
- minimum=1,
207
- maximum=5,
208
- value=2,
209
- step=1,
210
- label="Magnification Scale",
211
- interactive=not IS_SPACE,
212
- )
213
- controlnet_conditioning_scale = gr.Slider(
214
- minimum=0,
215
- maximum=1,
216
- step=0.001,
217
- value=0.5,
218
- label="ControlNet Conditioning Scale",
219
- )
220
- strength = gr.Slider(
221
- minimum=0,
222
- maximum=1,
223
- step=0.001,
224
- value=1,
225
- label="Strength",
226
- )
227
- controlnet_start = gr.Slider(
228
- minimum=0,
229
- maximum=1,
230
- step=0.001,
231
- value=0.0,
232
- label="ControlNet Start",
233
- )
234
- controlnet_end = gr.Slider(
235
- minimum=0.0,
236
- maximum=1.0,
237
- step=0.001,
238
- value=1.0,
239
- label="ControlNet End",
240
- )
241
- guassian_sigma = gr.Slider(
242
- minimum=0.01,
243
- maximum=10.0,
244
- step=0.1,
245
- value=2.0,
246
- label="(Anyline) Guassian Sigma",
247
- )
248
- intensity_threshold = gr.Slider(
249
- minimum=0,
250
- maximum=255,
251
- step=1,
252
- value=3,
253
- label="(Anyline) Intensity Threshold",
254
- )
255
-
256
- btn = gr.Button()
257
- with gr.Column(scale=2):
258
- with gr.Group():
259
- image_slider = ImageSlider(position=0.5)
260
- with gr.Row():
261
- padded_image = gr.Image(type="pil", label="Padded Image")
262
- anyline_image = gr.Image(type="pil", label="Anyline Image")
263
- inputs = [
264
- image_input,
265
- prompt,
266
- negative_prompt,
267
- seed,
268
- guidance_scale,
269
- scale,
270
- controlnet_conditioning_scale,
271
- strength,
272
- controlnet_start,
273
- controlnet_end,
274
- guassian_sigma,
275
- intensity_threshold,
276
- ]
277
- outputs = [image_slider, padded_image, anyline_image]
278
- btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
279
- fn=predict, inputs=inputs, outputs=outputs
280
- )
281
- gr.Examples(
282
- fn=predict,
283
- inputs=inputs,
284
- outputs=outputs,
285
- examples=[
286
- [
287
- "./examples/lara.jpeg",
288
- "photography of lara croft 8k high definition award winning",
289
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
290
- 5436236241,
291
- 8.5,
292
- 2,
293
- 0.8,
294
- 1.0,
295
- 0.0,
296
- 0.9,
297
- 2,
298
- 3,
299
- ],
300
- [
301
- "./examples/cybetruck.jpeg",
302
- "photo of tesla cybertruck futuristic car 8k high definition on a sand dune in mars, future",
303
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
304
- 383472451451,
305
- 8.5,
306
- 2,
307
- 0.8,
308
- 0.8,
309
- 0.0,
310
- 0.9,
311
- 2,
312
- 3,
313
- ],
314
- [
315
- "./examples/jesus.png",
316
- "a photorealistic painting of Jesus Christ, 4k high definition",
317
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
318
- 13317204146129588000,
319
- 8.5,
320
- 2,
321
- 0.8,
322
- 0.8,
323
- 0.0,
324
- 0.9,
325
- 2,
326
- 3,
327
- ],
328
- [
329
- "./examples/anna-sullivan-DioLM8ViiO8-unsplash.jpg",
330
- "A crowded stadium with enthusiastic fans watching a daytime sporting event, the stands filled with colorful attire and the sun casting a warm glow",
331
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
332
- 5623124123512,
333
- 8.5,
334
- 2,
335
- 0.8,
336
- 0.8,
337
- 0.0,
338
- 0.9,
339
- 2,
340
- 3,
341
- ],
342
- [
343
- "./examples/img_aef651cb-2919-499d-aa49-6d4e2e21a56e_1024.jpg",
344
- "a large red flower on a black background 4k high definition",
345
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
346
- 23123412341234,
347
- 8.5,
348
- 2,
349
- 0.8,
350
- 0.8,
351
- 0.0,
352
- 0.9,
353
- 2,
354
- 3,
355
- ],
356
- [
357
- "./examples/huggingface.jpg",
358
- "photo realistic huggingface human emoji costume, round, yellow, (human skin)+++ (human texture)+++",
359
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated",
360
- 12312353423,
361
- 15.206,
362
- 2,
363
- 0.364,
364
- 0.8,
365
- 0.0,
366
- 0.9,
367
- 2,
368
- 3,
369
- ],
370
- ],
371
- cache_examples="lazy",
372
- )
373
-
374
-
375
- demo.queue(api_open=True)
376
- demo.launch(show_api=True)
 
1
+ import os
2
+ import spaces
3
+ import gradio as gr
4
+ from gradio_imageslider import ImageSlider
5
+ import torch
6
+ torch.jit.script = lambda f: f
7
+ from hidiffusion import apply_hidiffusion
8
+ from diffusers import (
9
+ ControlNetModel,
10
+ StableDiffusionXLControlNetImg2ImgPipeline,
11
+ DDIMScheduler,
12
+ )
13
+ from controlnet_aux import AnylineDetector
14
+ from compel import Compel, ReturnedEmbeddingsType
15
+ from PIL import Image
16
+ import time
17
+ import numpy as np
18
+
19
+ IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
20
+ IS_SPACE = os.environ.get("SPACE_ID", None) is not None
21
+
22
+ device = "cuda" if torch.cuda.is_available() else "cpu"
23
+ dtype = torch.float16
24
+
25
+ LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
26
+
27
+ print(f"device: {device}")
28
+ print(f"dtype: {dtype}")
29
+ print(f"low memory: {LOW_MEMORY}")
30
+
31
+
32
+ model = "stabilityai/stable-diffusion-xl-base-1.0"
33
+ # model = "stabilityai/sdxl-turbo"
34
+ # vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
35
+ scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
36
+ # controlnet = ControlNetModel.from_pretrained(
37
+ # "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
38
+ # )
39
+ controlnet = ControlNetModel.from_pretrained(
40
+ "TheMistoAI/MistoLine",
41
+ torch_dtype=torch.float16,
42
+ revision="refs/pr/3",
43
+ variant="fp16",
44
+ )
45
+ pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
46
+ model,
47
+ controlnet=controlnet,
48
+ torch_dtype=dtype,
49
+ variant="fp16",
50
+ use_safetensors=True,
51
+ scheduler=scheduler,
52
+ )
53
+
54
+ compel = Compel(
55
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
56
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
57
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
58
+ requires_pooled=[False, True],
59
+ )
60
+ pipe = pipe.to(device)
61
+
62
+ if not IS_SPACES_ZERO:
63
+ apply_hidiffusion(pipe)
64
+ # pipe.enable_xformers_memory_efficient_attention()
65
+ pipe.enable_model_cpu_offload()
66
+ pipe.enable_vae_tiling()
67
+
68
+ anyline = AnylineDetector.from_pretrained(
69
+ "TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
70
+ ).to(device)
71
+
72
+
73
+ def pad_image(image):
74
+ w, h = image.size
75
+ if w == h:
76
+ return image
77
+ elif w > h:
78
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
79
+ pad_w = 0
80
+ pad_h = (w - h) // 2
81
+ new_image.paste(image, (0, pad_h))
82
+ return new_image
83
+ else:
84
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
85
+ pad_w = (h - w) // 2
86
+ pad_h = 0
87
+ new_image.paste(image, (pad_w, 0))
88
+ return new_image
89
+
90
+
91
+ @spaces.GPU(duration=120)
92
+ def predict(
93
+ input_image,
94
+ prompt,
95
+ negative_prompt,
96
+ seed,
97
+ guidance_scale=8.5,
98
+ scale=2,
99
+ controlnet_conditioning_scale=0.5,
100
+ strength=1.0,
101
+ controlnet_start=0.0,
102
+ controlnet_end=1.0,
103
+ guassian_sigma=2.0,
104
+ intensity_threshold=3,
105
+ progress=gr.Progress(track_tqdm=True),
106
+ ):
107
+ if IS_SPACES_ZERO:
108
+ apply_hidiffusion(pipe)
109
+ if input_image is None:
110
+ raise gr.Error("Please upload an image.")
111
+ padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
112
+ conditioning, pooled = compel([prompt, negative_prompt])
113
+ generator = torch.manual_seed(seed)
114
+ last_time = time.time()
115
+ anyline_image = anyline(
116
+ padded_image,
117
+ detect_resolution=1280,
118
+ guassian_sigma=max(0.01, guassian_sigma),
119
+ intensity_threshold=intensity_threshold,
120
+ )
121
+
122
+ images = pipe(
123
+ image=padded_image,
124
+ control_image=anyline_image,
125
+ strength=strength,
126
+ prompt_embeds=conditioning[0:1],
127
+ pooled_prompt_embeds=pooled[0:1],
128
+ negative_prompt_embeds=conditioning[1:2],
129
+ negative_pooled_prompt_embeds=pooled[1:2],
130
+ width=1024 * scale,
131
+ height=1024 * scale,
132
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
133
+ controlnet_start=float(controlnet_start),
134
+ controlnet_end=float(controlnet_end),
135
+ generator=generator,
136
+ num_inference_steps=30,
137
+ guidance_scale=guidance_scale,
138
+ eta=1.0,
139
+ )
140
+ print(f"Time taken: {time.time() - last_time}")
141
+ return (padded_image, images.images[0]), padded_image, anyline_image
142
+
143
+
144
+ css = """
145
+ #intro{
146
+ # max-width: 32rem;
147
+ # text-align: center;
148
+ # margin: 0 auto;
149
+ }
150
+ """
151
+
152
+ with gr.Blocks(css=css) as demo:
153
+ gr.Markdown(
154
+ """
155
+ # Enhance This
156
+ ### HiDiffusion SDXL
157
+
158
+ [HiDiffusion](https://github.com/megvii-research/HiDiffusion) enables higher-resolution image generation.
159
+ You can upload an initial image and prompt to generate an enhanced version.
160
+ SDXL Controlnet [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
161
+ [Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL?duplicate=true) to avoid the queue.
162
+
163
+ <small>
164
+ <b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun!
165
+
166
+ </small>
167
+ """,
168
+ elem_id="intro",
169
+ )
170
+ with gr.Row():
171
+ with gr.Column(scale=1):
172
+ image_input = gr.Image(type="pil", label="Input Image")
173
+ prompt = gr.Textbox(
174
+ label="Prompt",
175
+ info="The prompt is very important to get the desired results. Please try to describe the image as best as you can. Accepts Compel Syntax",
176
+ )
177
+ negative_prompt = gr.Textbox(
178
+ label="Negative Prompt",
179
+ value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
180
+ )
181
+ seed = gr.Slider(
182
+ minimum=0,
183
+ maximum=2**64 - 1,
184
+ value=1415926535897932,
185
+ step=1,
186
+ label="Seed",
187
+ randomize=True,
188
+ )
189
+ with gr.Accordion(label="Advanced", open=False):
190
+ guidance_scale = gr.Slider(
191
+ minimum=0,
192
+ maximum=50,
193
+ value=8.5,
194
+ step=0.001,
195
+ label="Guidance Scale",
196
+ )
197
+ scale = gr.Slider(
198
+ minimum=1,
199
+ maximum=5,
200
+ value=2,
201
+ step=1,
202
+ label="Magnification Scale",
203
+ interactive=not IS_SPACE,
204
+ )
205
+ controlnet_conditioning_scale = gr.Slider(
206
+ minimum=0,
207
+ maximum=1,
208
+ step=0.001,
209
+ value=0.5,
210
+ label="ControlNet Conditioning Scale",
211
+ )
212
+ strength = gr.Slider(
213
+ minimum=0,
214
+ maximum=1,
215
+ step=0.001,
216
+ value=1,
217
+ label="Strength",
218
+ )
219
+ controlnet_start = gr.Slider(
220
+ minimum=0,
221
+ maximum=1,
222
+ step=0.001,
223
+ value=0.0,
224
+ label="ControlNet Start",
225
+ )
226
+ controlnet_end = gr.Slider(
227
+ minimum=0.0,
228
+ maximum=1.0,
229
+ step=0.001,
230
+ value=1.0,
231
+ label="ControlNet End",
232
+ )
233
+ guassian_sigma = gr.Slider(
234
+ minimum=0.01,
235
+ maximum=10.0,
236
+ step=0.1,
237
+ value=2.0,
238
+ label="(Anyline) Guassian Sigma",
239
+ )
240
+ intensity_threshold = gr.Slider(
241
+ minimum=0,
242
+ maximum=255,
243
+ step=1,
244
+ value=3,
245
+ label="(Anyline) Intensity Threshold",
246
+ )
247
+
248
+ btn = gr.Button()
249
+ with gr.Column(scale=2):
250
+ with gr.Group():
251
+ image_slider = ImageSlider(position=0.5)
252
+ with gr.Row():
253
+ padded_image = gr.Image(type="pil", label="Padded Image")
254
+ anyline_image = gr.Image(type="pil", label="Anyline Image")
255
+ inputs = [
256
+ image_input,
257
+ prompt,
258
+ negative_prompt,
259
+ seed,
260
+ guidance_scale,
261
+ scale,
262
+ controlnet_conditioning_scale,
263
+ strength,
264
+ controlnet_start,
265
+ controlnet_end,
266
+ guassian_sigma,
267
+ intensity_threshold,
268
+ ]
269
+ outputs = [image_slider, padded_image, anyline_image]
270
+ btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
271
+ fn=predict, inputs=inputs, outputs=outputs
272
+ )
273
+ gr.Examples(
274
+ fn=predict,
275
+ inputs=inputs,
276
+ outputs=outputs,
277
+ examples=[
278
+ [
279
+ "./examples/lara.jpeg",
280
+ "photography of lara croft 8k high definition award winning",
281
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
282
+ 5436236241,
283
+ 8.5,
284
+ 2,
285
+ 0.8,
286
+ 1.0,
287
+ 0.0,
288
+ 0.9,
289
+ 2,
290
+ 3,
291
+ ],
292
+ [
293
+ "./examples/cybetruck.jpeg",
294
+ "photo of tesla cybertruck futuristic car 8k high definition on a sand dune in mars, future",
295
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
296
+ 383472451451,
297
+ 8.5,
298
+ 2,
299
+ 0.8,
300
+ 0.8,
301
+ 0.0,
302
+ 0.9,
303
+ 2,
304
+ 3,
305
+ ],
306
+ [
307
+ "./examples/jesus.png",
308
+ "a photorealistic painting of Jesus Christ, 4k high definition",
309
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
310
+ 13317204146129588000,
311
+ 8.5,
312
+ 2,
313
+ 0.8,
314
+ 0.8,
315
+ 0.0,
316
+ 0.9,
317
+ 2,
318
+ 3,
319
+ ],
320
+ [
321
+ "./examples/anna-sullivan-DioLM8ViiO8-unsplash.jpg",
322
+ "A crowded stadium with enthusiastic fans watching a daytime sporting event, the stands filled with colorful attire and the sun casting a warm glow",
323
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
324
+ 5623124123512,
325
+ 8.5,
326
+ 2,
327
+ 0.8,
328
+ 0.8,
329
+ 0.0,
330
+ 0.9,
331
+ 2,
332
+ 3,
333
+ ],
334
+ [
335
+ "./examples/img_aef651cb-2919-499d-aa49-6d4e2e21a56e_1024.jpg",
336
+ "a large red flower on a black background 4k high definition",
337
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
338
+ 23123412341234,
339
+ 8.5,
340
+ 2,
341
+ 0.8,
342
+ 0.8,
343
+ 0.0,
344
+ 0.9,
345
+ 2,
346
+ 3,
347
+ ],
348
+ [
349
+ "./examples/huggingface.jpg",
350
+ "photo realistic huggingface human emoji costume, round, yellow, (human skin)+++ (human texture)+++",
351
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated",
352
+ 12312353423,
353
+ 15.206,
354
+ 2,
355
+ 0.364,
356
+ 0.8,
357
+ 0.0,
358
+ 0.9,
359
+ 2,
360
+ 3,
361
+ ],
362
+ ],
363
+ cache_examples="lazy",
364
+ )
365
+
366
+
367
+ demo.queue(api_open=True)
368
+ demo.launch(show_api=True)