Spaces:
Runtime error
Runtime error
Update app.py
Browse filesChanging "torch_dtype=torch.float16" to "torch.get_default_dtype()" provides compatibility with the free CPU on Huggingface.co
The renders take hundreds of minutes on the CPU, and generating more than one image at a time causes a connection error.
The image quality is excellent.
app.py
CHANGED
@@ -54,7 +54,7 @@ current_model_path = current_model.path
|
|
54 |
if is_colab:
|
55 |
pipe = StableDiffusionPipeline.from_pretrained(
|
56 |
current_model.path,
|
57 |
-
torch_dtype=torch.
|
58 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
59 |
safety_checker=lambda images, clip_input: (images, False)
|
60 |
)
|
@@ -62,7 +62,7 @@ if is_colab:
|
|
62 |
else:
|
63 |
pipe = StableDiffusionPipeline.from_pretrained(
|
64 |
current_model.path,
|
65 |
-
torch_dtype=torch.
|
66 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
67 |
)
|
68 |
|
@@ -120,14 +120,14 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
120 |
if is_colab or current_model == custom_model:
|
121 |
pipe = StableDiffusionPipeline.from_pretrained(
|
122 |
current_model_path,
|
123 |
-
torch_dtype=torch.
|
124 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
125 |
safety_checker=lambda images, clip_input: (images, False)
|
126 |
)
|
127 |
else:
|
128 |
pipe = StableDiffusionPipeline.from_pretrained(
|
129 |
current_model_path,
|
130 |
-
torch_dtype=torch.
|
131 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
132 |
)
|
133 |
# pipe = pipe.to("cpu")
|
@@ -164,14 +164,14 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
164 |
if is_colab or current_model == custom_model:
|
165 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
166 |
current_model_path,
|
167 |
-
torch_dtype=torch.
|
168 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
169 |
safety_checker=lambda images, clip_input: (images, False)
|
170 |
)
|
171 |
else:
|
172 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
173 |
current_model_path,
|
174 |
-
torch_dtype=torch.
|
175 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
176 |
)
|
177 |
# pipe = pipe.to("cpu")
|
|
|
54 |
if is_colab:
|
55 |
pipe = StableDiffusionPipeline.from_pretrained(
|
56 |
current_model.path,
|
57 |
+
torch_dtype=torch.get_default_dtype(),
|
58 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
59 |
safety_checker=lambda images, clip_input: (images, False)
|
60 |
)
|
|
|
62 |
else:
|
63 |
pipe = StableDiffusionPipeline.from_pretrained(
|
64 |
current_model.path,
|
65 |
+
torch_dtype=torch.get_default_dtype(),
|
66 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
67 |
)
|
68 |
|
|
|
120 |
if is_colab or current_model == custom_model:
|
121 |
pipe = StableDiffusionPipeline.from_pretrained(
|
122 |
current_model_path,
|
123 |
+
torch_dtype=torch.get_default_dtype(),
|
124 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
125 |
safety_checker=lambda images, clip_input: (images, False)
|
126 |
)
|
127 |
else:
|
128 |
pipe = StableDiffusionPipeline.from_pretrained(
|
129 |
current_model_path,
|
130 |
+
torch_dtype=torch.get_default_dtype(),
|
131 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
132 |
)
|
133 |
# pipe = pipe.to("cpu")
|
|
|
164 |
if is_colab or current_model == custom_model:
|
165 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
166 |
current_model_path,
|
167 |
+
torch_dtype=torch.get_default_dtype(),
|
168 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
169 |
safety_checker=lambda images, clip_input: (images, False)
|
170 |
)
|
171 |
else:
|
172 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
173 |
current_model_path,
|
174 |
+
torch_dtype=torch.get_default_dtype(),
|
175 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
176 |
)
|
177 |
# pipe = pipe.to("cpu")
|