rafaaa2105 commited on
Commit
c8f91a3
1 Parent(s): e728b1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -7,10 +7,10 @@ import spaces
7
  model_list = [model.strip() for model in os.environ.get("MODELS").split(",")]
8
  lora_list = [model.strip() for model in os.environ.get("LORAS").split(",")]
9
 
 
10
  models = {}
11
  for model_name in model_list:
12
  try:
13
- print("Loading " + model_name)
14
  models[model_name] = DiffusionPipeline.from_pretrained(model_name).to("cuda")
15
  except Exception as e:
16
  print(f"Error loading model {model_name}: {e}")
@@ -18,18 +18,16 @@ for model_name in model_list:
18
  @spaces.GPU
19
  def generate_images(model_name, prompt, negative_prompt, num_inference_steps, guidance_scale, num_images=4):
20
  if model_name not in models:
21
- try:
22
- models[model_name] = DiffusionPipeline.from_pretrained(model_name).to("cuda")
23
- except Exception as e:
24
- print(f"Error loading model {model_name}: {e}")
25
- return []
26
  pipe = models[model_name]
27
  outputs = []
 
28
  for _ in range(num_images):
29
  output = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)["images"][0]
30
  outputs.append(output)
31
- return outputs
32
 
 
33
 
34
  # Create the Gradio blocks
35
  with gr.Blocks() as demo:
@@ -43,11 +41,12 @@ with gr.Blocks() as demo:
43
  num_inference_steps = gr.Slider(minimum=10, maximum=50, step=1, value=25, label="Number of Inference Steps")
44
  guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.5, value=7.5, label="Guidance Scale")
45
  num_images = gr.Slider(minimum=1, maximum=4, step=1, value=4, label="Number of Images")
 
46
  generate_btn = gr.Button("Generate Image")
47
-
48
- with gr.Column():
49
- output_gallery = gr.Gallery(label="Generated Images", height=480, scale=1)
50
 
51
  generate_btn.click(generate_images, inputs=[model_dropdown, prompt, negative_prompt, num_inference_steps, guidance_scale, num_images], outputs=output_gallery)
52
 
53
- demo.launch()
 
7
  model_list = [model.strip() for model in os.environ.get("MODELS").split(",")]
8
  lora_list = [model.strip() for model in os.environ.get("LORAS").split(",")]
9
 
10
+ # Load all models upfront
11
  models = {}
12
  for model_name in model_list:
13
  try:
 
14
  models[model_name] = DiffusionPipeline.from_pretrained(model_name).to("cuda")
15
  except Exception as e:
16
  print(f"Error loading model {model_name}: {e}")
 
18
  @spaces.GPU
19
  def generate_images(model_name, prompt, negative_prompt, num_inference_steps, guidance_scale, num_images=4):
20
  if model_name not in models:
21
+ return []
22
+
 
 
 
23
  pipe = models[model_name]
24
  outputs = []
25
+
26
  for _ in range(num_images):
27
  output = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)["images"][0]
28
  outputs.append(output)
 
29
 
30
+ return outputs
31
 
32
  # Create the Gradio blocks
33
  with gr.Blocks() as demo:
 
41
  num_inference_steps = gr.Slider(minimum=10, maximum=50, step=1, value=25, label="Number of Inference Steps")
42
  guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.5, value=7.5, label="Guidance Scale")
43
  num_images = gr.Slider(minimum=1, maximum=4, step=1, value=4, label="Number of Images")
44
+
45
  generate_btn = gr.Button("Generate Image")
46
+
47
+ with gr.Column():
48
+ output_gallery = gr.Gallery(label="Generated Images", height=480, scale=1)
49
 
50
  generate_btn.click(generate_images, inputs=[model_dropdown, prompt, negative_prompt, num_inference_steps, guidance_scale, num_images], outputs=output_gallery)
51
 
52
+ demo.launch()