Spaces:
Sleeping
Sleeping
patrickvonplaten
commited on
Commit
•
e331aa7
1
Parent(s):
217997c
up
Browse files- app.py +34 -196
- convert.py +79 -0
- requirements.txt +7 -10
app.py
CHANGED
@@ -1,198 +1,36 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
from huggingface_hub import HfApi, upload_folder
|
4 |
import gradio as gr
|
5 |
-
import hf_utils
|
6 |
-
import utils
|
7 |
-
from safetensors import safe_open
|
8 |
-
import torch
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
def convert_and_push(radio_model_names, input_model, ckpt_name, sd_version, token, path_in_repo, ema, safetensors):
|
46 |
-
extract_ema = ema == "ema"
|
47 |
-
|
48 |
-
if sd_version == None:
|
49 |
-
return error_str("You must select a stable diffusion version.", title="Invalid input")
|
50 |
-
|
51 |
-
model_id = url_to_model_id(input_model) if radio_model_names == "Other" else radio_model_names
|
52 |
-
|
53 |
-
try:
|
54 |
-
model_id = url_to_model_id(model_id)
|
55 |
-
|
56 |
-
# 1. Download the checkpoint file
|
57 |
-
ckpt_path, revision = hf_utils.download_file(repo_id=model_id, filename=ckpt_name, token=token)
|
58 |
-
|
59 |
-
if safetensors == "yes":
|
60 |
-
tensors = {}
|
61 |
-
with safe_open(ckpt_path, framework="pt", device="cpu") as f:
|
62 |
-
for key in f.keys():
|
63 |
-
tensors[key] = f.get_tensor(key)
|
64 |
-
|
65 |
-
new_checkpoint_path = "/".join(ckpt_path.split("/")[:-1] + ["model_safe.ckpt"])
|
66 |
-
torch.save(tensors, new_checkpoint_path)
|
67 |
-
ckpt_path = new_checkpoint_path
|
68 |
-
print("Converting ckpt_path", ckpt_path)
|
69 |
-
|
70 |
-
print(ckpt_path)
|
71 |
-
|
72 |
-
# 2. Run the conversion script
|
73 |
-
os.makedirs(model_id, exist_ok=True)
|
74 |
-
run_command = [
|
75 |
-
"python3",
|
76 |
-
"./diffs/scripts/convert_original_stable_diffusion_to_diffusers.py",
|
77 |
-
"--checkpoint_path",
|
78 |
-
ckpt_path,
|
79 |
-
"--dump_path" ,
|
80 |
-
model_id,
|
81 |
-
]
|
82 |
-
if extract_ema:
|
83 |
-
run_command.append("--extract_ema")
|
84 |
-
subprocess.run(run_command)
|
85 |
-
|
86 |
-
# 3. Push to the model repo
|
87 |
-
commit_message="Add Diffusers weights"
|
88 |
-
upload_folder(
|
89 |
-
folder_path=model_id,
|
90 |
-
repo_id=model_id,
|
91 |
-
path_in_repo=path_in_repo,
|
92 |
-
token=token,
|
93 |
-
create_pr=True,
|
94 |
-
commit_message=commit_message,
|
95 |
-
commit_description=f"Add Diffusers weights converted from checkpoint `{ckpt_name}` in revision {revision}",
|
96 |
-
)
|
97 |
-
|
98 |
-
# # 4. Delete the downloaded checkpoint file, yaml files, and the converted model folder
|
99 |
-
hf_utils.delete_file(revision)
|
100 |
-
subprocess.run(["rm", "-rf", model_id.split('/')[0]])
|
101 |
-
import glob
|
102 |
-
for f in glob.glob("*.yaml*"):
|
103 |
-
subprocess.run(["rm", "-rf", f])
|
104 |
-
|
105 |
-
return f"""Successfully converted the checkpoint and opened a PR to add the weights to the model repo.
|
106 |
-
You can view and merge the PR [here]({hf_utils.get_pr_url(HfApi(token=token), model_id, commit_message)})."""
|
107 |
-
|
108 |
-
return "Done"
|
109 |
-
|
110 |
-
except Exception as e:
|
111 |
-
return error_str(e)
|
112 |
-
|
113 |
-
|
114 |
-
DESCRIPTION = """### Convert a stable diffusion checkpoint to Diffusers🧨
|
115 |
-
With this space, you can easily convert a CompVis stable diffusion checkpoint to Diffusers and automatically create a pull request to the model repo.
|
116 |
-
You can choose to convert a checkpoint from one of your own models, or from any other model on the Hub.
|
117 |
-
You can skip the queue by running the app in the colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/qunash/f0f3152c5851c0c477b68b7b98d547fe/convert-sd-to-diffusers.ipynb)"""
|
118 |
-
|
119 |
-
with gr.Blocks() as demo:
|
120 |
-
|
121 |
-
gr.Markdown(DESCRIPTION)
|
122 |
-
with gr.Row():
|
123 |
-
|
124 |
-
with gr.Column(scale=11):
|
125 |
-
with gr.Column():
|
126 |
-
gr.Markdown("## 1. Load model info")
|
127 |
-
input_token = gr.Textbox(
|
128 |
-
max_lines=1,
|
129 |
-
type="password",
|
130 |
-
label="Enter your Hugging Face token",
|
131 |
-
placeholder="READ permission is sufficient"
|
132 |
-
)
|
133 |
-
gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)")
|
134 |
-
with gr.Group(visible=False) as group_model:
|
135 |
-
radio_model_names = gr.Radio(label="Choose a model")
|
136 |
-
input_model = gr.Textbox(
|
137 |
-
max_lines=1,
|
138 |
-
label="Model name or URL",
|
139 |
-
placeholder="username/model_name",
|
140 |
-
visible=False,
|
141 |
-
)
|
142 |
-
|
143 |
-
btn_get_ckpts = gr.Button("Load", visible=False)
|
144 |
-
|
145 |
-
with gr.Column(scale=10):
|
146 |
-
with gr.Column(visible=False) as group_convert:
|
147 |
-
gr.Markdown("## 2. Convert to Diffusers🧨")
|
148 |
-
radio_ckpts = gr.Radio(label="Choose the checkpoint to convert", visible=False)
|
149 |
-
path_in_repo = gr.Textbox(label="Path where the weights will be saved", placeholder="Leave empty for root folder")
|
150 |
-
ema = gr.Radio(label="Extract EMA or non-EMA?", choices=["ema", "non-ema"])
|
151 |
-
safetensors = gr.Radio(label="Extract from safetensors", choices=["yes", "no"], value="no")
|
152 |
-
radio_sd_version = gr.Radio(label="Choose the model version", choices=["v1", "v2", "v2.1"])
|
153 |
-
gr.Markdown("Conversion may take a few minutes.")
|
154 |
-
btn_convert = gr.Button("Convert & Push")
|
155 |
-
|
156 |
-
error_output = gr.Markdown(label="Output")
|
157 |
-
|
158 |
-
input_token.change(
|
159 |
-
fn=on_token_change,
|
160 |
-
inputs=input_token,
|
161 |
-
outputs=[group_model, radio_model_names, btn_get_ckpts, error_output],
|
162 |
-
queue=False,
|
163 |
-
scroll_to_output=True)
|
164 |
-
|
165 |
-
radio_model_names.change(
|
166 |
-
lambda x: gr.update(visible=x == "Other"),
|
167 |
-
inputs=radio_model_names,
|
168 |
-
outputs=input_model,
|
169 |
-
queue=False,
|
170 |
-
scroll_to_output=True)
|
171 |
-
|
172 |
-
btn_get_ckpts.click(
|
173 |
-
fn=get_ckpt_names,
|
174 |
-
inputs=[input_token, radio_model_names, input_model],
|
175 |
-
outputs=[error_output, radio_ckpts, group_convert],
|
176 |
-
scroll_to_output=True,
|
177 |
-
queue=False
|
178 |
-
)
|
179 |
-
|
180 |
-
btn_convert.click(
|
181 |
-
fn=convert_and_push,
|
182 |
-
inputs=[radio_model_names, input_model, radio_ckpts, radio_sd_version, input_token, path_in_repo, ema, safetensors],
|
183 |
-
outputs=error_output,
|
184 |
-
scroll_to_output=True
|
185 |
-
)
|
186 |
-
|
187 |
-
# gr.Markdown("""<img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/imgs/diffusers_library.jpg" width="150"/>""")
|
188 |
-
gr.HTML("""
|
189 |
-
<div style="border-top: 1px solid #303030;">
|
190 |
-
<br>
|
191 |
-
<p>Space by: <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a></p><br>
|
192 |
-
<a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
|
193 |
-
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.sd-to-diffusers" alt="visitors"></p>
|
194 |
-
</div>
|
195 |
-
""")
|
196 |
-
|
197 |
-
demo.queue()
|
198 |
-
demo.launch(debug=True, share=utils.is_google_colab())
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
from convert import convert
|
4 |
+
|
5 |
+
DESCRIPTION = """
|
6 |
+
The steps are the following:
|
7 |
+
|
8 |
+
- Paste a read-access token from hf.co/settings/tokens. Read access is enough given that we will open a PR against the source repo.
|
9 |
+
- Input a model id from the Hub
|
10 |
+
- Input the filename from the root dir of the repo that you would like to convert, e.g. 'v2-1_768-ema-pruned.ckpt' or 'v1-5-pruned.safetensors'
|
11 |
+
- Chose which Stable Diffusion version, image size, scheduler type the model has and whether you want the "ema", or "non-ema" weights.
|
12 |
+
- Click "Submit"
|
13 |
+
- That's it! You'll get feedback if it works or not, and if it worked, you'll get the URL of the opened PR 🔥
|
14 |
+
|
15 |
+
⚠️ For now only `pytorch_model.bin` files are supported but we'll extend in the future.
|
16 |
+
"""
|
17 |
+
|
18 |
+
demo = gr.Interface(
|
19 |
+
title="Convert any model to Safetensors and open a PR",
|
20 |
+
description=DESCRIPTION,
|
21 |
+
allow_flagging="never",
|
22 |
+
article="Check out the [Safetensors repo on GitHub](https://github.com/huggingface/safetensors)",
|
23 |
+
inputs=[
|
24 |
+
gr.Text(max_lines=1, label="your_hf_token"),
|
25 |
+
gr.Text(max_lines=1, label="model_id"),
|
26 |
+
gr.Text(max_lines=1, label="filename"),
|
27 |
+
gr.Radio(label="Model type", choices=["v1", "v2.0", "v2.1"]),
|
28 |
+
gr.Radio(label="Sample size (px)", choices=[512, 768]),
|
29 |
+
gr.Radio(label="Scheduler type", choices=["pndm", "heun", "euler", "dpm", "ddim"], value="dpm"),
|
30 |
+
gr.Radio(label="Extract EMA or non-EMA?", choices=["ema", "non-ema"], value="ema"),
|
31 |
+
],
|
32 |
+
outputs=[gr.Markdown(label="output")],
|
33 |
+
fn=convert,
|
34 |
+
).queue(max_size=10, concurrency_count=1)
|
35 |
+
|
36 |
+
demo.launch(show_api=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
convert.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import shutil
|
6 |
+
from collections import defaultdict
|
7 |
+
from inspect import signature
|
8 |
+
from tempfile import TemporaryDirectory
|
9 |
+
from typing import Dict, List, Optional, Set
|
10 |
+
|
11 |
+
import torch
|
12 |
+
from io import BytesIO
|
13 |
+
|
14 |
+
from huggingface_hub import CommitInfo, Discussion, HfApi, hf_hub_download
|
15 |
+
from huggingface_hub.file_download import repo_folder_name
|
16 |
+
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
|
17 |
+
from transformers import CONFIG_MAPPING
|
18 |
+
|
19 |
+
|
20 |
+
COMMIT_MESSAGE = " This PR adds the both fp32 and fp16 in PyTorch and safetensors format to {}"
|
21 |
+
|
22 |
+
|
23 |
+
def convert_single(model_id: str, filename: str, model_type: str, sample_size: int, scheduler_type: str, extract_ema: bool, folder: str):
|
24 |
+
from_safetensors = filename.endswith(".safetensors")
|
25 |
+
|
26 |
+
local_file = os.path.join(model_id, filename)
|
27 |
+
ckpt_file = local_file if os.path.isfile(local_file) else hf_hub_download(repo_id=model_id, filename=filename)
|
28 |
+
|
29 |
+
if model_type == "v1":
|
30 |
+
config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
|
31 |
+
elif model_type == "v2.0":
|
32 |
+
config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference.yaml"
|
33 |
+
elif model_type == "v2.1":
|
34 |
+
config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
|
35 |
+
|
36 |
+
config_file = BytesIO(requests.get(config_url).content)
|
37 |
+
|
38 |
+
pipeline = download_from_original_stable_diffusion_ckpt(ckpt_file, config_file, image_size=sample_size, scheduler_type=scheduler_type, from_safetensors=from_safetensors, extract_ema=extract_ema)
|
39 |
+
|
40 |
+
pipeline.save_pretrained(folder)
|
41 |
+
pipeline.save_pretrained(folder, safe_serialization=True)
|
42 |
+
|
43 |
+
pipeline = pipeline.to(torch_dtype=torch.float16)
|
44 |
+
pipeline.save_pretrained(folder, variant="fp16")
|
45 |
+
pipeline.save_pretrained(folder, safe_serialization=True, variant="fp16")
|
46 |
+
|
47 |
+
return folder
|
48 |
+
|
49 |
+
|
50 |
+
def previous_pr(api: "HfApi", model_id: str, pr_title: str) -> Optional["Discussion"]:
|
51 |
+
try:
|
52 |
+
discussions = api.get_repo_discussions(repo_id=model_id)
|
53 |
+
except Exception:
|
54 |
+
return None
|
55 |
+
for discussion in discussions:
|
56 |
+
if discussion.status == "open" and discussion.is_pull_request and discussion.title == pr_title:
|
57 |
+
details = api.get_discussion_details(repo_id=model_id, discussion_num=discussion.num)
|
58 |
+
if details.target_branch == "refs/heads/main":
|
59 |
+
return discussion
|
60 |
+
|
61 |
+
|
62 |
+
def convert(token: str, model_id: str, filename: str, model_type: str, sample_size: int = 512, scheduler_type: str = "pndm", extract_ema: bool = True):
|
63 |
+
api = HfApi()
|
64 |
+
|
65 |
+
pr_title = "Adding `diffusers` weights of this model"
|
66 |
+
|
67 |
+
with TemporaryDirectory() as d:
|
68 |
+
folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
|
69 |
+
os.makedirs(folder)
|
70 |
+
new_pr = None
|
71 |
+
try:
|
72 |
+
folder = convert_single(model_id, filename, model_type, sample_size, scheduler_type, extract_ema, folder)
|
73 |
+
new_pr = api.upload_folder(folder_path=folder, path_in_repo="./", repo_id=model_id, repo_type="model", token=token, commit_description=COMMIT_MESSAGE.format(model_id), create_pr=True)
|
74 |
+
pr_number = new_pr.split("%2F")[-1].split("/")[0]
|
75 |
+
print(f"Pr created at: {'https://huggingface.co/' + os.path.join(model_id, 'discussions', pr_number)}")
|
76 |
+
finally:
|
77 |
+
shutil.rmtree(folder)
|
78 |
+
|
79 |
+
return new_pr
|
requirements.txt
CHANGED
@@ -1,10 +1,7 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
OmegaConf
|
9 |
-
ftfy
|
10 |
-
safetensors
|
|
|
1 |
+
huggingface_hub
|
2 |
+
safetensors
|
3 |
+
transformers
|
4 |
+
accelerate
|
5 |
+
git+https://github.com/huggingface/diffusers
|
6 |
+
omegaconf
|
7 |
+
pytorch_lightning
|
|
|
|
|
|