Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,259 Bytes
33f50a8 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 99c90b0 0556cb5 df86cb4 0556cb5 99c90b0 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 df86cb4 9dabfc7 df86cb4 acf4855 3417b1d 03d9736 df86cb4 dff5a51 df86cb4 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 df86cb4 dff5a51 df86cb4 0556cb5 dff5a51 0556cb5 df86cb4 0556cb5 df86cb4 0556cb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import spaces
import argparse
import os
import time
from os import path
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path
import gradio as gr
import torch
from diffusers import StableDiffusionXLPipeline, LCMScheduler
# from scheduling_tcd import TCDScheduler
torch.backends.cuda.matmul.allow_tf32 = True
class timer:
def __init__(self, method_name="timed process"):
self.method = method_name
def __enter__(self):
self.start = time.time()
print(f"{self.method} starts")
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
print(f"{self.method} took {str(round(end - self.start, 2))}s")
if not path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16)
pipe.to(device="cuda", dtype=torch.bfloat16)
unet_state = load_file(hf_hub_download("ByteDance/Hyper-SD", "Hyper-SDXL-1step-Unet.safetensors"), device="cuda")
pipe.unet.load_state_dict(unet_state)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, timestep_spacing ="trailing")
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
with gr.Row(equal_height=False):
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
with gr.Accordion("Advanced options", open=False):
with gr.Group():
with gr.Row():
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=99999999,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row(visible=True):
width = gr.Slider(
label="Width",
minimum=256,
maximum=8192,
step=32,
value=2048,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=8192,
step=32,
value=2048,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[result, seed],
fn=generate,
cache_examples=CACHE_EXAMPLES,
)
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, 99999999)
return seed
@spaces.GPU(duration=10)
def process_image( height, width, prompt, seed, randomize_seed):
global pipe
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
return pipe(
prompt=str,,
num_inference_steps=1,
guidance_scale=0.,
height=int(height),
width=int(width),
timesteps=[800],
randomize_seed: bool = False,
use_resolution_binning: bool = True,
progress=gr.Progress(track_tqdm=True),
).images
seed = int(randomize_seed_fn(seed, randomize_seed))
generator = torch.Generator().manual_seed(seed)
reactive_controls = [ height, width, prompt, seed, randomize_seed]
btn.click(process_image, inputs=reactive_controls, outputs=[output])
if __name__ == "__main__":
demo.launch()
DESCRIPTION = """ # Instant Image
### Super fast text to Image Generator.
### <span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.
### First Image processing takes time then images generate faster.
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
examples = [
"A Monkey with a happy face in the Sahara desert.",
"Eiffel Tower was Made up of ICE.",
"Color photo of a corgi made of transparent glass, standing on the riverside in Yosemite National Park.",
"A close-up photo of a woman. She wore a blue coat with a gray dress underneath and has blue eyes.",
"A litter of golden retriever puppies playing in the snow. Their heads pop out of the snow, covered in.",
"an astronaut sitting in a diner, eating fries, cinematic, analog film",
]
|