Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ import torch
|
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
9 |
from blora_utils import BLOCKS, filter_lora, scale_lora
|
10 |
|
|
|
11 |
hf_token = os.environ.get("YOUR_HF_TOKEN_WITH_READ_PERMISSION")
|
12 |
login(token=hf_token)
|
13 |
|
@@ -151,6 +152,26 @@ css="""
|
|
151 |
margin: 0 auto;
|
152 |
max-width: 520px;
|
153 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
"""
|
155 |
|
156 |
if torch.cuda.is_available():
|
@@ -161,6 +182,23 @@ else:
|
|
161 |
with gr.Blocks(css=css) as demo:
|
162 |
|
163 |
with gr.Column(elem_id="col-container"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
gr.Markdown(f"""
|
165 |
# B-LoRas Inference
|
166 |
Currently running on {power_device}.
|
|
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
9 |
from blora_utils import BLOCKS, filter_lora, scale_lora
|
10 |
|
11 |
+
is_shared_ui = True if "fffiloni/B-LoRa-Inference" in os.environ['SPACE_ID'] else False
|
12 |
hf_token = os.environ.get("YOUR_HF_TOKEN_WITH_READ_PERMISSION")
|
13 |
login(token=hf_token)
|
14 |
|
|
|
152 |
margin: 0 auto;
|
153 |
max-width: 520px;
|
154 |
}
|
155 |
+
div#warning-duplicate {
|
156 |
+
background-color: #ebf5ff;
|
157 |
+
padding: 0 16px 16px;
|
158 |
+
margin: 20px 0;
|
159 |
+
}
|
160 |
+
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
|
161 |
+
color: #0f4592!important;
|
162 |
+
}
|
163 |
+
div#warning-duplicate strong {
|
164 |
+
color: #0f4592;
|
165 |
+
}
|
166 |
+
p.actions {
|
167 |
+
display: flex;
|
168 |
+
align-items: center;
|
169 |
+
margin: 20px 0;
|
170 |
+
}
|
171 |
+
div#warning-duplicate .actions a {
|
172 |
+
display: inline-block;
|
173 |
+
margin-right: 10px;
|
174 |
+
}
|
175 |
"""
|
176 |
|
177 |
if torch.cuda.is_available():
|
|
|
182 |
with gr.Blocks(css=css) as demo:
|
183 |
|
184 |
with gr.Column(elem_id="col-container"):
|
185 |
+
if is_shared_ui:
|
186 |
+
top_description = gr.HTML(f'''
|
187 |
+
<div class="gr-prose">
|
188 |
+
<h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
|
189 |
+
Note: you might want to use a private custom B-LoRa model</h2>
|
190 |
+
<p class="main-message">
|
191 |
+
To do so, <strong>duplicate the Space</strong> and run it on your own profile using <strong>your own access token</strong> and eventually a GPU (T4-small or A10G-small) for faster inference without waiting in the queue.<br />
|
192 |
+
</p>
|
193 |
+
<p class="actions">
|
194 |
+
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
|
195 |
+
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
|
196 |
+
</a>
|
197 |
+
to start using private models and skip the queue
|
198 |
+
</p>
|
199 |
+
</div>
|
200 |
+
''', elem_id="warning-duplicate")
|
201 |
+
|
202 |
gr.Markdown(f"""
|
203 |
# B-LoRas Inference
|
204 |
Currently running on {power_device}.
|