prithivMLmods commited on
Commit
0c1b8f7
1 Parent(s): db33805

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +253 -0
app.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ import os
3
+ import random
4
+ import uuid
5
+ import json
6
+ import gradio as gr
7
+ import numpy as np
8
+ from PIL import Image
9
+ import spaces
10
+ import torch
11
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
12
+
13
+ DESCRIPTIONx = """## STABLE HAMSTER 🐹
14
+ """
15
+
16
+ DESCRIPTIONy = """
17
+ <p align="left">
18
+ <a title="Github" href="https://github.com/PRITHIVSAKTHIUR/Stable-Hamster" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
19
+ <img src="https://img.shields.io/github/stars/PRITHIVSAKTHIUR/Stable-Hamster?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
20
+ </a>
21
+ </p>
22
+ """
23
+
24
+ css = '''
25
+ .gradio-container{max-width: 560px !important}
26
+ h1{text-align:center}
27
+ footer {
28
+ visibility: hidden
29
+ }
30
+ '''
31
+
32
+ examples = [
33
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
34
+ "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
35
+ "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
36
+ "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
37
+ "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
38
+ ]
39
+
40
+ MODEL_OPTIONS = {
41
+ "RealVisXL_V4.0_Lightning": "SG161222/RealVisXL_V4.0_Lightning",
42
+ "Animagine-XL-3.1": "cagliostrolab/animagine-xl-3.1"
43
+ }
44
+
45
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
46
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
47
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
48
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
49
+
50
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
51
+
52
+ def load_model(model_id):
53
+ pipe = StableDiffusionXLPipeline.from_pretrained(
54
+ model_id,
55
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
56
+ use_safetensors=True,
57
+ add_watermarker=False,
58
+ ).to(device)
59
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
60
+
61
+ if USE_TORCH_COMPILE:
62
+ pipe.compile()
63
+
64
+ if ENABLE_CPU_OFFLOAD:
65
+ pipe.enable_model_cpu_offload()
66
+
67
+ return pipe
68
+
69
+ current_model_id = MODEL_OPTIONS["RealVisXL_V4.0_Lightning"]
70
+ pipe = load_model(current_model_id)
71
+
72
+ MAX_SEED = np.iinfo(np.int32).max
73
+
74
+ def save_image(img):
75
+ unique_name = str(uuid.uuid4()) + ".png"
76
+ img.save(unique_name)
77
+ return unique_name
78
+
79
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
80
+ if randomize_seed:
81
+ seed = random.randint(0, MAX_SEED)
82
+ return seed
83
+
84
+ @spaces.GPU(duration=60, enable_queue=True)
85
+ def generate(
86
+ model_choice: str,
87
+ prompt: str,
88
+ negative_prompt: str = "",
89
+ use_negative_prompt: bool = False,
90
+ seed: int = 1,
91
+ width: int = 1024,
92
+ height: int = 1024,
93
+ guidance_scale: float = 3,
94
+ num_inference_steps: int = 25,
95
+ randomize_seed: bool = False,
96
+ use_resolution_binning: bool = True,
97
+ num_images: int = 1,
98
+ progress=gr.Progress(track_tqdm=True),
99
+ ):
100
+ global pipe
101
+ if model_choice != current_model_id:
102
+ pipe = load_model(MODEL_OPTIONS[model_choice])
103
+
104
+ seed = int(randomize_seed_fn(seed, randomize_seed))
105
+ generator = torch.Generator(device=device).manual_seed(seed)
106
+
107
+ options = {
108
+ "prompt": [prompt] * num_images,
109
+ "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
110
+ "width": width,
111
+ "height": height,
112
+ "guidance_scale": guidance_scale,
113
+ "num_inference_steps": num_inference_steps,
114
+ "generator": generator,
115
+ "output_type": "pil",
116
+ }
117
+
118
+ if use_resolution_binning:
119
+ options["use_resolution_binning"] = True
120
+
121
+ images = []
122
+ for i in range(0, num_images, BATCH_SIZE):
123
+ batch_options = options.copy()
124
+ batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
125
+ if "negative_prompt" in batch_options:
126
+ batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
127
+ images.extend(pipe(**batch_options).images)
128
+
129
+ image_paths = [save_image(img) for img in images]
130
+ return image_paths, seed
131
+
132
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
133
+ gr.Markdown(DESCRIPTIONx)
134
+
135
+ with gr.Group():
136
+ with gr.Row():
137
+ model_choice = gr.Dropdown(
138
+ label="Model",
139
+ choices=list(MODEL_OPTIONS.keys()),
140
+ value="RealVisXL_V4.0_Lightning"
141
+ )
142
+ prompt = gr.Text(
143
+ label="Prompt",
144
+ show_label=False,
145
+ max_lines=1,
146
+ placeholder="Enter your prompt",
147
+ container=False,
148
+ )
149
+ run_button = gr.Button("Run", scale=0)
150
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
151
+
152
+ with gr.Accordion("Advanced options", open=False, visible=False):
153
+ num_images = gr.Slider(
154
+ label="Number of Images",
155
+ minimum=1,
156
+ maximum=4,
157
+ step=1,
158
+ value=1,
159
+ )
160
+ with gr.Row():
161
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
162
+ negative_prompt = gr.Text(
163
+ label="Negative prompt",
164
+ max_lines=5,
165
+ lines=4,
166
+ placeholder="Enter a negative prompt",
167
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
168
+ visible=True,
169
+ )
170
+ seed = gr.Slider(
171
+ label="Seed",
172
+ minimum=0,
173
+ maximum=MAX_SEED,
174
+ step=1,
175
+ value=0,
176
+ )
177
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
178
+ with gr.Row(visible=True):
179
+ width = gr.Slider(
180
+ label="Width",
181
+ minimum=512,
182
+ maximum=MAX_IMAGE_SIZE,
183
+ step=64,
184
+ value=1024,
185
+ )
186
+ height = gr.Slider(
187
+ label="Height",
188
+ minimum=512,
189
+ maximum=MAX_IMAGE_SIZE,
190
+ step=64,
191
+ value=1024,
192
+ )
193
+ with gr.Row():
194
+ guidance_scale = gr.Slider(
195
+ label="Guidance Scale",
196
+ minimum=0.1,
197
+ maximum=6,
198
+ step=0.1,
199
+ value=3.0,
200
+ )
201
+ num_inference_steps = gr.Slider(
202
+ label="Number of inference steps",
203
+ minimum=1,
204
+ maximum=25,
205
+ step=1,
206
+ value=23,
207
+ )
208
+
209
+ gr.Examples(
210
+ examples=examples,
211
+ inputs=prompt,
212
+ cache_examples=False
213
+ )
214
+
215
+ use_negative_prompt.change(
216
+ fn=lambda x: gr.update(visible=x),
217
+ inputs=use_negative_prompt,
218
+ outputs=negative_prompt,
219
+ api_name=False,
220
+ )
221
+
222
+ gr.on(
223
+ triggers=[
224
+ prompt.submit,
225
+ negative_prompt.submit,
226
+ run_button.click,
227
+ ],
228
+ fn=generate,
229
+ inputs=[
230
+ model_choice,
231
+ prompt,
232
+ negative_prompt,
233
+ use_negative_prompt,
234
+ seed,
235
+ width,
236
+ height,
237
+ guidance_scale,
238
+ num_inference_steps,
239
+ randomize_seed,
240
+ num_images
241
+ ],
242
+ outputs=[result, seed],
243
+ api_name="run",
244
+ )
245
+
246
+ gr.Markdown(DESCRIPTIONy)
247
+ gr.Markdown("**Disclaimer:**")
248
+ gr.Markdown("This is the high-quality image generation demo space, which generates images in fractions of a second by using highly detailed prompts. This space can also make mistakes, so use it wisely.")
249
+ gr.Markdown("**Note:**")
250
+ gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
251
+
252
+ if __name__ == "__main__":
253
+ demo.queue(max_size=40).launch()