taki0112 commited on
Commit
9d41154
1 Parent(s): eeb7e29
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -22,7 +22,6 @@ def memory_efficient(model):
22
  model.to(device)
23
  except Exception as e:
24
  print("Error moving model to device:", e)
25
-
26
  try:
27
  model.enable_model_cpu_offload()
28
  except AttributeError:
@@ -31,11 +30,12 @@ def memory_efficient(model):
31
  model.enable_vae_slicing()
32
  except AttributeError:
33
  print("enable_vae_slicing is not supported.")
34
- if device == 'cuda':
35
- try:
36
- model.enable_xformers_memory_efficient_attention()
37
- except AttributeError:
38
- print("enable_xformers_memory_efficient_attention is not supported.")
 
39
 
40
  model = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch_dtype)
41
  print("SDXL")
 
22
  model.to(device)
23
  except Exception as e:
24
  print("Error moving model to device:", e)
 
25
  try:
26
  model.enable_model_cpu_offload()
27
  except AttributeError:
 
30
  model.enable_vae_slicing()
31
  except AttributeError:
32
  print("enable_vae_slicing is not supported.")
33
+
34
+ # if device == 'cuda':
35
+ # try:
36
+ # model.enable_xformers_memory_efficient_attention()
37
+ # except AttributeError:
38
+ # print("enable_xformers_memory_efficient_attention is not supported.")
39
 
40
  model = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch_dtype)
41
  print("SDXL")