multimodalart HF staff commited on
Commit
1d5c6d0
1 Parent(s): 1475e41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -14,7 +14,7 @@ with open(lora_list, "r") as file:
14
  data = json.load(file)
15
  sdxl_loras = [
16
  {
17
- "image": item["image"],
18
  "title": item["title"],
19
  "repo": item["repo"],
20
  "trigger_word": item["trigger_word"],
@@ -46,16 +46,19 @@ original_pipe = copy.deepcopy(pipe)
46
 
47
  #@spaces.GPU
48
  def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
 
49
  pipe = copy.deepcopy(original_pipe)
 
50
  pipe.load_lora_weights(shuffled_items[0]['repo'], weight_name=shuffled_items[0]['weights'])
51
  pipe.fuse_lora(lora_1_scale)
52
  pipe.load_lora_weights(shuffled_items[1]['repo'], weight_name=shuffled_items[1]['weights'])
53
  pipe.fuse_lora(lora_2_scale)
54
-
55
  pipe.to(torch_dtype=torch.float16)
56
  pipe.to("cuda")
57
  if negative_prompt == "":
58
  negative_prompt = False
 
59
  image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, guidance_scale=7).images[0]
60
  return image
61
 
@@ -86,12 +89,12 @@ with gr.Blocks(css=css) as demo:
86
  )
87
  with gr.Row():
88
  with gr.Column(min_width=10, scale=6):
89
- lora_1 = gr.Image(interactive=False, height=350)
90
  lora_1_prompt = gr.Markdown()
91
  with gr.Column(min_width=10, scale=1, elem_id="plus_column"):
92
  plus = gr.HTML("+", elem_id="plus_button")
93
  with gr.Column(min_width=10, scale=6):
94
- lora_2 = gr.Image(interactive=False, height=350)
95
  lora_2_prompt = gr.Markdown()
96
  with gr.Row():
97
  prompt = gr.Textbox(label="Your prompt", info="arrange the trigger words of the two LoRAs in a coherent sentence", interactive=True, elem_id="prompt")
 
14
  data = json.load(file)
15
  sdxl_loras = [
16
  {
17
+ "image": item["image"] if item["image"].startswith("https://") else f"https://huggingface.co/spaces/multimodalart/LoraTheExplorer/resolve/main/{item["image"]}",
18
  "title": item["title"],
19
  "repo": item["repo"],
20
  "trigger_word": item["trigger_word"],
 
46
 
47
  #@spaces.GPU
48
  def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
49
+ print("Copying pipe")
50
  pipe = copy.deepcopy(original_pipe)
51
+ print("Loading LoRAs")
52
  pipe.load_lora_weights(shuffled_items[0]['repo'], weight_name=shuffled_items[0]['weights'])
53
  pipe.fuse_lora(lora_1_scale)
54
  pipe.load_lora_weights(shuffled_items[1]['repo'], weight_name=shuffled_items[1]['weights'])
55
  pipe.fuse_lora(lora_2_scale)
56
+
57
  pipe.to(torch_dtype=torch.float16)
58
  pipe.to("cuda")
59
  if negative_prompt == "":
60
  negative_prompt = False
61
+ print("Running inference")
62
  image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, guidance_scale=7).images[0]
63
  return image
64
 
 
89
  )
90
  with gr.Row():
91
  with gr.Column(min_width=10, scale=6):
92
+ lora_1 = gr.Image(interactive=False, height=300)
93
  lora_1_prompt = gr.Markdown()
94
  with gr.Column(min_width=10, scale=1, elem_id="plus_column"):
95
  plus = gr.HTML("+", elem_id="plus_button")
96
  with gr.Column(min_width=10, scale=6):
97
+ lora_2 = gr.Image(interactive=False, height=300)
98
  lora_2_prompt = gr.Markdown()
99
  with gr.Row():
100
  prompt = gr.Textbox(label="Your prompt", info="arrange the trigger words of the two LoRAs in a coherent sentence", interactive=True, elem_id="prompt")