prithivMLmods commited on
Commit
60631c2
1 Parent(s): 2401cf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -22
app.py CHANGED
@@ -8,10 +8,9 @@ from PIL import Image
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
- from diffusers import AuraFlowPipeline
12
 
13
  css = '''
14
- .gradio-container{max-width: 600px !important}
15
  h1{text-align:center}
16
  footer {
17
  visibility: hidden
@@ -29,7 +28,7 @@ examples = [
29
  MODEL_OPTIONS = {
30
  "Lightning": "SG161222/RealVisXL_V4.0_Lightning",
31
  "Turbovision": "SG161222/RealVisXL_V3.0_Turbo",
32
- "AuraFlow": "fal/AuraFlow"
33
  }
34
 
35
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
@@ -40,19 +39,13 @@ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
40
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
41
 
42
  def load_and_prepare_model(model_id):
43
- if model_id == "fal/AuraFlow":
44
- pipe = AuraFlowPipeline.from_pretrained(
45
- model_id,
46
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
47
- ).to(device)
48
- else:
49
- pipe = StableDiffusionXLPipeline.from_pretrained(
50
- model_id,
51
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
52
- use_safetensors=True,
53
- add_watermarker=False,
54
- ).to(device)
55
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
56
 
57
  if USE_TORCH_COMPILE:
58
  pipe.compile()
@@ -141,9 +134,6 @@ def load_predefined_images():
141
  ]
142
  return predefined_images
143
 
144
-
145
-
146
-
147
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
148
  with gr.Row():
149
  prompt = gr.Text(
@@ -160,7 +150,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
160
  model_choice = gr.Dropdown(
161
  label="Model Selection",
162
  choices=list(MODEL_OPTIONS.keys()),
163
- value="AuraFlow"
164
  )
165
 
166
  with gr.Accordion("Advanced options", open=True, visible=False):
@@ -264,7 +254,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
264
 
265
  with gr.Column(scale=3):
266
  gr.Markdown("### Image Gallery")
267
- predefined_gallery = gr.Gallery(label="Image Gallery", columns=4, show_label=False, value=load_predefined_images())
268
 
269
  if __name__ == "__main__":
270
- demo.queue(max_size=20).launch(show_api=False)
 
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
 
11
 
12
  css = '''
13
+ .gradio-container{max-width: 570px !important}
14
  h1{text-align:center}
15
  footer {
16
  visibility: hidden
 
28
  MODEL_OPTIONS = {
29
  "Lightning": "SG161222/RealVisXL_V4.0_Lightning",
30
  "Turbovision": "SG161222/RealVisXL_V3.0_Turbo",
31
+
32
  }
33
 
34
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
 
39
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
40
 
41
  def load_and_prepare_model(model_id):
42
+ pipe = StableDiffusionXLPipeline.from_pretrained(
43
+ model_id,
44
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
45
+ use_safetensors=True,
46
+ add_watermarker=False,
47
+ ).to(device)
48
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
 
 
 
 
49
 
50
  if USE_TORCH_COMPILE:
51
  pipe.compile()
 
134
  ]
135
  return predefined_images
136
 
 
 
 
137
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
138
  with gr.Row():
139
  prompt = gr.Text(
 
150
  model_choice = gr.Dropdown(
151
  label="Model Selection",
152
  choices=list(MODEL_OPTIONS.keys()),
153
+ value="Lightning"
154
  )
155
 
156
  with gr.Accordion("Advanced options", open=True, visible=False):
 
254
 
255
  with gr.Column(scale=3):
256
  gr.Markdown("### Image Gallery")
257
+ predefined_gallery = gr.Gallery(label="Image Gallery", columns=4, show_label=False, value=load_predefined_images())
258
 
259
  if __name__ == "__main__":
260
+ demo.queue(max_size=20).launch(show_api=False)