prithivMLmods commited on
Commit
b70f175
·
verified ·
1 Parent(s): 4c7da1a

Create app.txt

Browse files
Files changed (1) hide show
  1. last-commit/app.txt +262 -0
last-commit/app.txt ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ import os
3
+ import random
4
+ import uuid
5
+ import gradio as gr
6
+ import numpy as np
7
+ from PIL import Image
8
+ import spaces
9
+ import torch
10
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
+
12
+ DESCRIPTIONx = """
13
+
14
+
15
+ """
16
+
17
+ css = '''
18
+ .gradio-container{max-width: 560px !important}
19
+ h1{text-align:center}
20
+ footer {
21
+ visibility: hidden
22
+ }
23
+ '''
24
+
25
+ #examples = [
26
+ # "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
27
+ # "Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic oil --ar 2:3 --q 2 --s 750 --v 5 --ar 2:3 --q 2 --s 750 --v 5",
28
+ # "Illustration of A starry night camp in the mountains. Low-angle view, Minimal background, Geometric shapes theme, Pottery, Split-complementary colors, Bicolored light, UHD",
29
+ # "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
30
+ # "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
31
+ #]
32
+
33
+ MODEL_OPTIONS = {
34
+ "Lightning": "SG161222/RealVisXL_V4.0_Lightning",
35
+ "Realvision": "SG161222/RealVisXL_V4.0",
36
+ }
37
+
38
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
39
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
40
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
41
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
42
+
43
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
44
+
45
+ def load_and_prepare_model(model_id):
46
+ pipe = StableDiffusionXLPipeline.from_pretrained(
47
+ model_id,
48
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
49
+ use_safetensors=True,
50
+ add_watermarker=False,
51
+ ).to(device)
52
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
53
+
54
+ if USE_TORCH_COMPILE:
55
+ pipe.compile()
56
+
57
+ if ENABLE_CPU_OFFLOAD:
58
+ pipe.enable_model_cpu_offload()
59
+
60
+ return pipe
61
+
62
+ # Preload and compile both models
63
+ models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
64
+
65
+ MAX_SEED = np.iinfo(np.int32).max
66
+
67
+ def save_image(img):
68
+ unique_name = str(uuid.uuid4()) + ".png"
69
+ img.save(unique_name)
70
+ return unique_name
71
+
72
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
73
+ if randomize_seed:
74
+ seed = random.randint(0, MAX_SEED)
75
+ return seed
76
+
77
+ @spaces.GPU(duration=60, enable_queue=True)
78
+ def generate(
79
+ model_choice: str,
80
+ prompt: str,
81
+ negative_prompt: str = "",
82
+ use_negative_prompt: bool = False,
83
+ seed: int = 1,
84
+ width: int = 1024,
85
+ height: int = 1024,
86
+ guidance_scale: float = 3,
87
+ num_inference_steps: int = 25,
88
+ randomize_seed: bool = False,
89
+ use_resolution_binning: bool = True,
90
+ num_images: int = 1,
91
+ progress=gr.Progress(track_tqdm=True),
92
+ ):
93
+ global models
94
+ pipe = models[model_choice]
95
+
96
+ seed = int(randomize_seed_fn(seed, randomize_seed))
97
+ generator = torch.Generator(device=device).manual_seed(seed)
98
+
99
+ options = {
100
+ "prompt": [prompt] * num_images,
101
+ "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
102
+ "width": width,
103
+ "height": height,
104
+ "guidance_scale": guidance_scale,
105
+ "num_inference_steps": num_inference_steps,
106
+ "generator": generator,
107
+ "output_type": "pil",
108
+ }
109
+
110
+ if use_resolution_binning:
111
+ options["use_resolution_binning"] = True
112
+
113
+ images = []
114
+ for i in range(0, num_images, BATCH_SIZE):
115
+ batch_options = options.copy()
116
+ batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
117
+ if "negative_prompt" in batch_options:
118
+ batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
119
+ images.extend(pipe(**batch_options).images)
120
+
121
+ image_paths = [save_image(img) for img in images]
122
+ return image_paths, seed
123
+
124
+ def load_predefined_images():
125
+ predefined_images = [
126
+ "assets/1.png",
127
+ "assets/2.png",
128
+ "assets/3.png",
129
+ "assets/4.png",
130
+ "assets/5.png",
131
+ "assets/6.png",
132
+ "assets/7.png",
133
+ "assets/8.png",
134
+ "assets/9.png",
135
+ "assets/10.png",
136
+ "assets/11.png",
137
+ "assets/12.png",
138
+ ]
139
+ return predefined_images
140
+
141
+ with gr.Blocks(css=css) as demo:
142
+ gr.Markdown(DESCRIPTIONx)
143
+ with gr.Row():
144
+ prompt = gr.Text(
145
+ label="Prompt",
146
+ show_label=False,
147
+ max_lines=1,
148
+ placeholder="Enter your prompt",
149
+ value="A cartoon of a Ironman fighting with Hulk, wall painting",
150
+ container=False,
151
+ )
152
+ run_button = gr.Button("Run⚡", scale=0)
153
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
154
+
155
+ with gr.Row():
156
+ model_choice = gr.Dropdown(
157
+ label="Model Selection",
158
+ choices=list(MODEL_OPTIONS.keys()),
159
+ value="Lightning"
160
+ )
161
+
162
+ with gr.Accordion("Advanced options", open=True, visible=False):
163
+ num_images = gr.Slider(
164
+ label="Number of Images",
165
+ minimum=1,
166
+ maximum=1,
167
+ step=1,
168
+ value=1,
169
+ )
170
+ with gr.Row():
171
+ with gr.Column(scale=1):
172
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
173
+ negative_prompt = gr.Text(
174
+ label="Negative prompt",
175
+ max_lines=5,
176
+ lines=4,
177
+ placeholder="Enter a negative prompt",
178
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
179
+ visible=True,
180
+ )
181
+ seed = gr.Slider(
182
+ label="Seed",
183
+ minimum=0,
184
+ maximum=MAX_SEED,
185
+ step=1,
186
+ value=0,
187
+ )
188
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
189
+ with gr.Row():
190
+ width = gr.Slider(
191
+ label="Width",
192
+ minimum=512,
193
+ maximum=MAX_IMAGE_SIZE,
194
+ step=64,
195
+ value=1024,
196
+ )
197
+ height = gr.Slider(
198
+ label="Height",
199
+ minimum=512,
200
+ maximum=MAX_IMAGE_SIZE,
201
+ step=64,
202
+ value=1024,
203
+ )
204
+ with gr.Row():
205
+ guidance_scale = gr.Slider(
206
+ label="Guidance Scale",
207
+ minimum=0.1,
208
+ maximum=6,
209
+ step=0.1,
210
+ value=3.0,
211
+ )
212
+ num_inference_steps = gr.Slider(
213
+ label="Number of inference steps",
214
+ minimum=1,
215
+ maximum=35,
216
+ step=1,
217
+ value=20,
218
+ )
219
+
220
+ # gr.Examples(
221
+ # examples=examples,
222
+ # inputs=prompt,
223
+ # cache_examples=False
224
+ #)
225
+
226
+ use_negative_prompt.change(
227
+ fn=lambda x: gr.update(visible=x),
228
+ inputs=use_negative_prompt,
229
+ outputs=negative_prompt,
230
+ api_name=False,
231
+ )
232
+
233
+ gr.on(
234
+ triggers=[
235
+ prompt.submit,
236
+ negative_prompt.submit,
237
+ run_button.click,
238
+ ],
239
+ fn=generate,
240
+ inputs=[
241
+ model_choice,
242
+ prompt,
243
+ negative_prompt,
244
+ use_negative_prompt,
245
+ seed,
246
+ width,
247
+ height,
248
+ guidance_scale,
249
+ num_inference_steps,
250
+ randomize_seed,
251
+ num_images
252
+ ],
253
+ outputs=[result, seed],
254
+ api_name="run",
255
+ )
256
+
257
+ # with gr.Column(scale=3):
258
+ # gr.Markdown("### Image Gallery")
259
+ # predefined_gallery = gr.Gallery(label="Image Gallery", columns=4, show_label=False, value=load_predefined_images())
260
+
261
+ if __name__ == "__main__":
262
+ demo.queue(max_size=40).launch(show_api=False)