multimodalart HF staff commited on
Commit
9b729f7
1 Parent(s): e97bdf7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from time import sleep
3
  from diffusers import DiffusionPipeline
4
  from huggingface_hub import hf_hub_download
 
5
 
6
  import torch
7
  import json
@@ -28,12 +29,11 @@ with open(lora_list, "r") as file:
28
  for item in data
29
  ]
30
 
31
- saved_names = [
32
- hf_hub_download(item["repo"], item["weights"]) for item in sdxl_loras
33
- ]
34
-
35
- for item, saved_name in zip(sdxl_loras, saved_names):
36
  item["saved_name"] = saved_name
 
 
37
 
38
  css = '''
39
  #title{text-align:center;}
@@ -49,23 +49,21 @@ css = '''
49
  }
50
  '''
51
 
52
- #@spaces.GPU
53
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
54
  original_pipe = copy.deepcopy(pipe)
55
 
56
  def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
57
  pipe = copy.deepcopy(original_pipe)
58
- pipe.to("cuda")
59
- print("Loading LoRAs")
60
- pipe.load_lora_weights(shuffled_items[0]['saved_name'])
61
  pipe.fuse_lora(lora_1_scale)
62
- pipe.load_lora_weights(shuffled_items[1]['saved_name'])
63
  pipe.fuse_lora(lora_2_scale)
64
 
65
  if negative_prompt == "":
66
  negative_prompt = False
67
 
68
- image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
69
  del pipe
70
  gc.collect()
71
  torch.cuda.empty_cache()
 
2
  from time import sleep
3
  from diffusers import DiffusionPipeline
4
  from huggingface_hub import hf_hub_download
5
+ from safetensors.torch import load_file
6
 
7
  import torch
8
  import json
 
29
  for item in data
30
  ]
31
 
32
+ for item in sdxl_loras:
33
+ saved_name = hf_hub_download(item["repo"], item["weights"])
 
 
 
34
  item["saved_name"] = saved_name
35
+ state_dict = load_file(saved_name)
36
+ item["state_dict"] = {k: v.to(device="cuda", dtype=torch.float16) for k, v in state_dict.items() if torch.is_tensor(v)}
37
 
38
  css = '''
39
  #title{text-align:center;}
 
49
  }
50
  '''
51
 
 
52
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
53
  original_pipe = copy.deepcopy(pipe)
54
 
55
  def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
56
  pipe = copy.deepcopy(original_pipe)
57
+ pipe.to("cuda")
58
+ pipe.load_lora_weights(shuffled_items[0]['state_dict'])
 
59
  pipe.fuse_lora(lora_1_scale)
60
+ pipe.load_lora_weights(shuffled_items[1]['state_dict'])
61
  pipe.fuse_lora(lora_2_scale)
62
 
63
  if negative_prompt == "":
64
  negative_prompt = False
65
 
66
+ image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=22, width=768, height=768).images[0]
67
  del pipe
68
  gc.collect()
69
  torch.cuda.empty_cache()