multimodalart HF staff commited on
Commit
3eb8dac
1 Parent(s): 58682f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -7,6 +7,7 @@ import torch
7
  import json
8
  import random
9
  import copy
 
10
 
11
  lora_list = hf_hub_download(repo_id="multimodalart/LoraTheExplorer", filename="sdxl_loras.json", repo_type="space")
12
 
@@ -45,21 +46,25 @@ css = '''
45
  '''
46
 
47
  #@spaces.GPU
 
 
 
48
  def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
49
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
50
- pipe.to(torch_dtype=torch.float16)
51
- pipe.to("cuda")
52
  print("Loading LoRAs")
53
  pipe.load_lora_weights(shuffled_items[0]['saved_name'])
54
  pipe.fuse_lora(lora_1_scale)
55
  pipe.load_lora_weights(shuffled_items[1]['saved_name'])
56
  pipe.fuse_lora(lora_2_scale)
57
 
58
-
59
  if negative_prompt == "":
60
  negative_prompt = False
61
- print("Running inference")
62
- image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, guidance_scale=7).images[0]
 
 
 
63
  return image
64
 
65
  def get_description(item):
 
7
  import json
8
  import random
9
  import copy
10
+ import gc
11
 
12
  lora_list = hf_hub_download(repo_id="multimodalart/LoraTheExplorer", filename="sdxl_loras.json", repo_type="space")
13
 
 
46
  '''
47
 
48
  #@spaces.GPU
49
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
50
+ original_pipe = copy.deepcopy(pipe)
51
+
52
  def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
53
+ pipe = copy.deepcopy(original_pipe)
54
+ pipe.to(device)
 
55
  print("Loading LoRAs")
56
  pipe.load_lora_weights(shuffled_items[0]['saved_name'])
57
  pipe.fuse_lora(lora_1_scale)
58
  pipe.load_lora_weights(shuffled_items[1]['saved_name'])
59
  pipe.fuse_lora(lora_2_scale)
60
 
 
61
  if negative_prompt == "":
62
  negative_prompt = False
63
+
64
+ image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
65
+ del pipe
66
+ gc.collect()
67
+ torch.cuda.empty_cache()
68
  return image
69
 
70
  def get_description(item):