multimodalart HF staff commited on
Commit
1816d2d
1 Parent(s): 30960aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +147 -134
app.py CHANGED
@@ -55,52 +55,73 @@ class calculateDuration:
55
  else:
56
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
57
 
58
- def update_selection(evt: gr.SelectData, width, height, selected_lora1, selected_lora2):
59
- selected_lora = loras[evt.index]
60
- new_placeholder = f"Type a prompt for {selected_lora['title']}"
 
 
 
 
 
 
 
 
61
 
62
  # Initialize outputs
63
- outputs = []
 
 
 
 
 
 
 
64
 
65
- if selected_lora1 is None:
66
- selected_lora1 = selected_lora
67
- selected_lora1_info = f"### LoRA 1 Selected: [{selected_lora1['title']}](https://huggingface.co/{selected_lora1['repo']}) ✨"
68
- lora_scale1_visible = True
69
- remove_lora1_visible = True
70
- elif selected_lora2 is None:
71
- selected_lora2 = selected_lora
72
- selected_lora2_info = f"### LoRA 2 Selected: [{selected_lora2['title']}](https://huggingface.co/{selected_lora2['repo']}) ✨"
73
- lora_scale2_visible = True
74
- remove_lora2_visible = True
75
  else:
76
- raise gr.Error("You can only select up to two LoRAs. Please remove one before selecting another.")
77
-
78
- # Update placeholder
79
- placeholder_update = gr.update(placeholder=new_placeholder)
80
-
81
- # For width and height adjustment
82
- if "aspect" in selected_lora:
83
- if selected_lora["aspect"] == "portrait":
84
- width = 768
85
- height = 1024
86
- elif selected_lora["aspect"] == "landscape":
87
- width = 1024
88
- height = 768
89
- else:
90
- width = 1024
91
- height = 1024
92
 
93
- return placeholder_update, selected_lora1, selected_lora2, selected_lora1_info, selected_lora2_info, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), width, height
 
 
 
 
 
 
 
94
 
95
- def remove_selected_lora1(selected_lora1, selected_lora1_info):
96
- selected_lora1 = None
97
- selected_lora1_info = ""
98
- return selected_lora1, selected_lora1_info, gr.update(visible=False), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
 
99
 
100
- def remove_selected_lora2(selected_lora2, selected_lora2_info):
101
- selected_lora2 = None
102
- selected_lora2_info = ""
103
- return selected_lora2, selected_lora2_info, gr.update(visible=False), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
 
104
 
105
  @spaces.GPU(duration=70)
106
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
@@ -115,6 +136,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
115
  width=width,
116
  height=height,
117
  generator=generator,
 
118
  output_type="pil",
119
  good_vae=good_vae,
120
  ):
@@ -134,59 +156,54 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
134
  width=width,
135
  height=height,
136
  generator=generator,
 
137
  output_type="pil",
138
  ).images[0]
139
  return final_image
140
-
141
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, randomize_seed, seed, width, height, selected_lora1, selected_lora2, lora_scale1, lora_scale2, progress=gr.Progress(track_tqdm=True)):
142
- if selected_lora1 is None and selected_lora2 is None:
143
- raise gr.Error("You must select at least one LoRA before proceeding.")
144
 
145
- # Build the prompt mash
146
- prompt_mash = prompt
 
147
 
148
- # Handle trigger words and positions
149
- trigger_words = []
150
- if selected_lora1 is not None:
151
- trigger_word1 = selected_lora1.get("trigger_word", "")
152
- if trigger_word1:
153
- if selected_lora1.get("trigger_position") == "prepend":
154
- trigger_words.insert(0, trigger_word1)
155
- else:
156
- trigger_words.append(trigger_word1)
157
- if selected_lora2 is not None:
158
- trigger_word2 = selected_lora2.get("trigger_word", "")
159
- if trigger_word2:
160
- if selected_lora2.get("trigger_position") == "prepend":
161
- trigger_words.insert(0, trigger_word2)
162
  else:
163
- trigger_words.append(trigger_word2)
164
- # Combine trigger words with the prompt
165
- if trigger_words:
166
- prompt_mash = f"{' '.join(trigger_words)} {prompt}"
167
 
168
- with calculateDuration("Unloading LoRAs"):
 
169
  pipe.unload_lora_weights()
170
  pipe_i2i.unload_lora_weights()
171
-
172
  # Load LoRA weights with respective scales
173
  with calculateDuration("Loading LoRA weights"):
174
- if image_input is not None:
175
- if selected_lora1 is not None:
176
- pipe_i2i.load_lora_weights(selected_lora1['repo'], weight_name=selected_lora1.get('weights'), scale=lora_scale1)
177
- if selected_lora2 is not None:
178
- pipe_i2i.load_lora_weights(selected_lora2['repo'], weight_name=selected_lora2.get('weights'), scale=lora_scale2)
179
- else:
180
- if selected_lora1 is not None:
181
- pipe.load_lora_weights(selected_lora1['repo'], weight_name=selected_lora1.get('weights'), scale=lora_scale1)
182
- if selected_lora2 is not None:
183
- pipe.load_lora_weights(selected_lora2['repo'], weight_name=selected_lora2.get('weights'), scale=lora_scale2)
184
-
 
 
 
185
  # Set random seed for reproducibility
186
  with calculateDuration("Randomizing seed"):
187
  if randomize_seed:
188
  seed = random.randint(0, MAX_SEED)
189
-
 
190
  if image_input is not None:
191
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
192
  yield final_image, seed, gr.update(visible=False)
@@ -196,37 +213,37 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, randomize_se
196
  final_image = None
197
  step_counter = 0
198
  for image in image_generator:
199
- step_counter += 1
200
  final_image = image
201
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
202
  yield image, seed, gr.update(value=progress_bar, visible=True)
203
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
204
-
205
  def get_huggingface_safetensors(link):
206
- split_link = link.split("/")
207
- if(len(split_link) == 2):
208
- model_card = ModelCard.load(link)
209
- base_model = model_card.data.get("base_model")
210
- print(base_model)
211
- if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
212
- raise Exception("Not a FLUX LoRA!")
213
- image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
214
- trigger_word = model_card.data.get("instance_prompt", "")
215
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
216
- fs = HfFileSystem()
217
- try:
218
- list_of_files = fs.ls(link, detail=False)
219
- for file in list_of_files:
220
- if(file.endswith(".safetensors")):
221
- safetensors_name = file.split("/")[-1]
222
- if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
223
- image_elements = file.split("/")
224
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
225
- except Exception as e:
226
- print(e)
227
- gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
228
- raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
229
- return split_link[1], link, safetensors_name, trigger_word, image_url
230
 
231
  def check_custom_model(link):
232
  if(link.startswith("https://")):
@@ -286,8 +303,8 @@ css = '''
286
  #title img{width: 100px; margin-right: 0.5em}
287
  #gallery .grid-wrap{height: 10vh}
288
  #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
289
- .custom_lora_card .card_internal{display: flex;height: 100px;margin-top: .5em}
290
- .custom_lora_card .card_internal img{margin-right: 1em}
291
  .styler{--form-gap-width: 0px !important}
292
  #progress{height:30px}
293
  #progress .generating{display:none}
@@ -299,11 +316,10 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css, delete_cache=(60, 3600)) as app:
299
  """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> LoRA Lab</h1>""",
300
  elem_id="title",
301
  )
302
- selected_lora1 = gr.State(None)
303
- selected_lora2 = gr.State(None)
304
  with gr.Row():
305
  with gr.Column(scale=3):
306
- prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting LoRAs")
307
  with gr.Column(scale=1, elem_id="gen_column"):
308
  generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
309
  with gr.Row():
@@ -320,21 +336,18 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css, delete_cache=(60, 3600)) as app:
320
  gr.Markdown("[Check the list of FLUX LoRas](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
321
  custom_lora_info = gr.HTML(visible=False)
322
  custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
323
- # Selected LoRAs section
324
- gr.Markdown("### Selected LoRAs")
325
- with gr.Row():
326
- with gr.Column():
327
- selected_lora1_info = gr.Markdown("", visible=False)
328
- lora_scale1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=0.95, visible=False)
329
- remove_lora1_button = gr.Button("Remove LoRA 1", visible=False)
330
- with gr.Column():
331
- selected_lora2_info = gr.Markdown("", visible=False)
332
- lora_scale2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=0.95, visible=False)
333
- remove_lora2_button = gr.Button("Remove LoRA 2", visible=False)
334
  with gr.Column():
335
  progress_bar = gr.Markdown(elem_id="progress",visible=False)
336
  result = gr.Image(label="Generated Image")
337
-
 
 
 
 
 
 
 
 
338
  with gr.Row():
339
  with gr.Accordion("Advanced Settings", open=False):
340
  with gr.Row():
@@ -352,35 +365,35 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css, delete_cache=(60, 3600)) as app:
352
  with gr.Row():
353
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
354
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
355
-
356
  gallery.select(
357
  update_selection,
358
- inputs=[width, height, selected_lora1, selected_lora2],
359
- outputs=[prompt, selected_lora1, selected_lora2, selected_lora1_info, selected_lora2_info, lora_scale1, remove_lora1_button, lora_scale2, remove_lora2_button, width, height]
360
  )
361
- remove_lora1_button.click(
362
- remove_selected_lora1,
363
- inputs=[selected_lora1, selected_lora1_info],
364
- outputs=[selected_lora1, selected_lora1_info, lora_scale1, remove_lora1_button]
365
  )
366
- remove_lora2_button.click(
367
- remove_selected_lora2,
368
- inputs=[selected_lora2, selected_lora2_info],
369
- outputs=[selected_lora2, selected_lora2_info, lora_scale2, remove_lora2_button]
370
  )
371
  custom_lora.input(
372
  add_custom_lora,
373
  inputs=[custom_lora],
374
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_lora1_info, selected_lora2_info, prompt]
375
  )
376
  custom_lora_button.click(
377
  remove_custom_lora,
378
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_lora1_info, selected_lora2_info, custom_lora]
379
  )
380
  gr.on(
381
  triggers=[generate_button.click, prompt.submit],
382
  fn=run_lora,
383
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, randomize_seed, seed, width, height, selected_lora1, selected_lora2, lora_scale1, lora_scale2],
384
  outputs=[result, seed, progress_bar]
385
  )
386
 
 
55
  else:
56
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
57
 
58
+ def update_selection(evt: gr.SelectData, selected_indices, width, height):
59
+ selected_index = evt.index
60
+ selected_indices = selected_indices or []
61
+ if selected_index in selected_indices:
62
+ # LoRA is already selected, remove it
63
+ selected_indices.remove(selected_index)
64
+ else:
65
+ if len(selected_indices) < 2:
66
+ selected_indices.append(selected_index)
67
+ else:
68
+ raise gr.Error("You can select up to 2 LoRAs only.")
69
 
70
  # Initialize outputs
71
+ selected_info_1 = ""
72
+ selected_info_2 = ""
73
+ if len(selected_indices) >= 1:
74
+ lora1 = loras[selected_indices[0]]
75
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
76
+ if len(selected_indices) >= 2:
77
+ lora2 = loras[selected_indices[1]]
78
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
79
 
80
+ # Update prompt placeholder based on last selected LoRA
81
+ if selected_indices:
82
+ last_selected_lora = loras[selected_indices[-1]]
83
+ new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
 
 
 
 
 
 
84
  else:
85
+ new_placeholder = "Type a prompt after selecting a LoRA"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ return (
88
+ gr.update(placeholder=new_placeholder),
89
+ selected_info_1,
90
+ selected_info_2,
91
+ selected_indices,
92
+ width,
93
+ height,
94
+ )
95
 
96
+ def remove_lora_1(selected_indices):
97
+ selected_indices = selected_indices or []
98
+ if len(selected_indices) >= 1:
99
+ selected_indices.pop(0)
100
+ # Update selected_info_1 and selected_info_2
101
+ selected_info_1 = ""
102
+ selected_info_2 = ""
103
+ if len(selected_indices) >= 1:
104
+ lora1 = loras[selected_indices[0]]
105
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
106
+ if len(selected_indices) >= 2:
107
+ lora2 = loras[selected_indices[1]]
108
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
109
+ return selected_info_1, selected_info_2, selected_indices
110
 
111
+ def remove_lora_2(selected_indices):
112
+ selected_indices = selected_indices or []
113
+ if len(selected_indices) >= 2:
114
+ selected_indices.pop(1)
115
+ # Update selected_info_1 and selected_info_2
116
+ selected_info_1 = ""
117
+ selected_info_2 = ""
118
+ if len(selected_indices) >= 1:
119
+ lora1 = loras[selected_indices[0]]
120
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
121
+ if len(selected_indices) >= 2:
122
+ lora2 = loras[selected_indices[1]]
123
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
124
+ return selected_info_1, selected_info_2, selected_indices
125
 
126
  @spaces.GPU(duration=70)
127
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
 
136
  width=width,
137
  height=height,
138
  generator=generator,
139
+ joint_attention_kwargs={"scale": 1.0},
140
  output_type="pil",
141
  good_vae=good_vae,
142
  ):
 
156
  width=width,
157
  height=height,
158
  generator=generator,
159
+ joint_attention_kwargs={"scale": 1.0},
160
  output_type="pil",
161
  ).images[0]
162
  return final_image
 
 
 
 
163
 
164
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, progress=gr.Progress(track_tqdm=True)):
165
+ if not selected_indices:
166
+ raise gr.Error("You must select at least one LoRA before proceeding.")
167
 
168
+ selected_loras = [loras[idx] for idx in selected_indices]
169
+
170
+ # Build the prompt with trigger words
171
+ prompt_mash = prompt
172
+ for lora in selected_loras:
173
+ trigger_word = lora.get('trigger_word', '')
174
+ if trigger_word:
175
+ if lora.get("trigger_position") == "prepend":
176
+ prompt_mash = f"{trigger_word} {prompt_mash}"
 
 
 
 
 
177
  else:
178
+ prompt_mash = f"{prompt_mash} {trigger_word}"
 
 
 
179
 
180
+ # Unload previous LoRA weights
181
+ with calculateDuration("Unloading LoRA"):
182
  pipe.unload_lora_weights()
183
  pipe_i2i.unload_lora_weights()
184
+
185
  # Load LoRA weights with respective scales
186
  with calculateDuration("Loading LoRA weights"):
187
+ for idx, lora in enumerate(selected_loras):
188
+ lora_path = lora['repo']
189
+ scale = lora_scale_1 if idx == 0 else lora_scale_2
190
+ if image_input is not None:
191
+ if "weights" in lora:
192
+ pipe_i2i.load_lora_weights(lora_path, weight_name=lora["weights"], multiplier=scale)
193
+ else:
194
+ pipe_i2i.load_lora_weights(lora_path, multiplier=scale)
195
+ else:
196
+ if "weights" in lora:
197
+ pipe.load_lora_weights(lora_path, weight_name=lora["weights"], multiplier=scale)
198
+ else:
199
+ pipe.load_lora_weights(lora_path, multiplier=scale)
200
+
201
  # Set random seed for reproducibility
202
  with calculateDuration("Randomizing seed"):
203
  if randomize_seed:
204
  seed = random.randint(0, MAX_SEED)
205
+
206
+ # Generate image
207
  if image_input is not None:
208
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
209
  yield final_image, seed, gr.update(visible=False)
 
213
  final_image = None
214
  step_counter = 0
215
  for image in image_generator:
216
+ step_counter+=1
217
  final_image = image
218
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
219
  yield image, seed, gr.update(value=progress_bar, visible=True)
220
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
221
+
222
  def get_huggingface_safetensors(link):
223
+ split_link = link.split("/")
224
+ if(len(split_link) == 2):
225
+ model_card = ModelCard.load(link)
226
+ base_model = model_card.data.get("base_model")
227
+ print(base_model)
228
+ if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
229
+ raise Exception("Not a FLUX LoRA!")
230
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
231
+ trigger_word = model_card.data.get("instance_prompt", "")
232
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
233
+ fs = HfFileSystem()
234
+ try:
235
+ list_of_files = fs.ls(link, detail=False)
236
+ for file in list_of_files:
237
+ if(file.endswith(".safetensors")):
238
+ safetensors_name = file.split("/")[-1]
239
+ if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
240
+ image_elements = file.split("/")
241
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
242
+ except Exception as e:
243
+ print(e)
244
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
245
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
246
+ return split_link[1], link, safetensors_name, trigger_word, image_url
247
 
248
  def check_custom_model(link):
249
  if(link.startswith("https://")):
 
303
  #title img{width: 100px; margin-right: 0.5em}
304
  #gallery .grid-wrap{height: 10vh}
305
  #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
306
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
307
+ .card_internal img{margin-right: 1em}
308
  .styler{--form-gap-width: 0px !important}
309
  #progress{height:30px}
310
  #progress .generating{display:none}
 
316
  """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> LoRA Lab</h1>""",
317
  elem_id="title",
318
  )
319
+ selected_indices = gr.State([])
 
320
  with gr.Row():
321
  with gr.Column(scale=3):
322
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
323
  with gr.Column(scale=1, elem_id="gen_column"):
324
  generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
325
  with gr.Row():
 
336
  gr.Markdown("[Check the list of FLUX LoRas](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
337
  custom_lora_info = gr.HTML(visible=False)
338
  custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
 
 
 
 
 
 
 
 
 
 
 
339
  with gr.Column():
340
  progress_bar = gr.Markdown(elem_id="progress",visible=False)
341
  result = gr.Image(label="Generated Image")
342
+ with gr.Row():
343
+ with gr.Column():
344
+ selected_info_1 = gr.Markdown("")
345
+ lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=0.95)
346
+ remove_button_1 = gr.Button("Remove LoRA 1")
347
+ with gr.Column():
348
+ selected_info_2 = gr.Markdown("")
349
+ lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=0.95)
350
+ remove_button_2 = gr.Button("Remove LoRA 2")
351
  with gr.Row():
352
  with gr.Accordion("Advanced Settings", open=False):
353
  with gr.Row():
 
365
  with gr.Row():
366
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
367
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
368
+
369
  gallery.select(
370
  update_selection,
371
+ inputs=[selected_indices, width, height],
372
+ outputs=[prompt, selected_info_1, selected_info_2, selected_indices, width, height]
373
  )
374
+ remove_button_1.click(
375
+ remove_lora_1,
376
+ inputs=[selected_indices],
377
+ outputs=[selected_info_1, selected_info_2, selected_indices]
378
  )
379
+ remove_button_2.click(
380
+ remove_lora_2,
381
+ inputs=[selected_indices],
382
+ outputs=[selected_info_1, selected_info_2, selected_indices]
383
  )
384
  custom_lora.input(
385
  add_custom_lora,
386
  inputs=[custom_lora],
387
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info_1, selected_indices, prompt]
388
  )
389
  custom_lora_button.click(
390
  remove_custom_lora,
391
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info_1, selected_indices, custom_lora]
392
  )
393
  gr.on(
394
  triggers=[generate_button.click, prompt.submit],
395
  fn=run_lora,
396
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height],
397
  outputs=[result, seed, progress_bar]
398
  )
399