import gradio as gr import spaces import torch from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d from pipeline_freescale import StableDiffusionXLPipeline from pipeline_freescale_turbo import StableDiffusionXLPipeline_Turbo dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0" model_ckpt_turbo = "stabilityai/sdxl-turbo" pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=dtype).to(device) pipe_turbo = StableDiffusionXLPipeline_Turbo.from_pretrained(model_ckpt_turbo, torch_dtype=dtype).to(device) generator = torch.Generator(device='cuda') torch.cuda.empty_cache() @spaces.GPU(duration=120) def infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps): generator = torch.Generator(device='cuda') generator = generator.manual_seed(seed) if not disable_freeu: register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) result = pipe(prompt, negative_prompt=negative_prompt, generator=generator, num_inference_steps=ddim_steps, guidance_scale=guidance_scale, resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale, restart_steps=restart_steps, ).images[0] return result @spaces.GPU(duration=40) def infer_gpu_part_turbo(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps): generator = torch.Generator(device='cuda') generator = generator.manual_seed(seed) if not disable_freeu: register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) result = pipe(prompt, negative_prompt=negative_prompt, generator=generator, num_inference_steps=ddim_steps, guidance_scale=guidance_scale, resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale, restart_steps=restart_steps, ).images[0] return result def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt): print(prompt) print(negative_prompt) disable_turbo = 'Disable Turbo' in options disable_freeu = 'Disable FreeU' in options if disable_turbo: fast_mode = True if output_size == "2048 x 2048": resolutions_list = [[1024, 1024], [2048, 2048]] elif output_size == "1024 x 2048": resolutions_list = [[512, 1024], [1024, 2048]] elif output_size == "2048 x 1024": resolutions_list = [[1024, 512], [2048, 1024]] restart_steps = [int(ddim_steps * 0.3)] # print('GPU starts') result = infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps) # print('GPU ends') else: fast_mode = False if output_size == "2048 x 2048": resolutions_list = [[512, 512], [1024, 1024], [2048, 2048]] elif output_size == "1024 x 2048": resolutions_list = [[256, 512], [512, 1024], [1024, 2048]] elif output_size == "2048 x 1024": resolutions_list = [[512, 256], [1024, 512], [2048, 1024]] restart_steps = [int(ddim_steps * 0.5)] * 2 # print('GPU starts') result = infer_gpu_part_turbo(pipe_turbo, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps) # print('GPU ends') return result examples = [ ["A Enchanted illustration of a Palatial Ghost Explosion with a Mystical Sky, in the style of Eric, viewed from CamProX, Bokeh. High resolution, 8k, insanely detailed.",], ["Brunette pilot girl in a snowstorm, full body, moody lighting, intricate details, depth of field, outdoors, Fujifilm XT3, RAW, 8K UHD, film grain, Unreal Engine 5, ray tracing.",], ["A cute and adorable fluffy puppy wearing a witch hat in a Halloween autumn evening forest, falling autumn leaves, brown acorns on the ground, Halloween pumpkins spiderwebs, bats, and a witch’s broom.",], ["A Fantasy Realism illustration of a Heroic Phoenix Rising Adventurous with a Fantasy Waterfall, in the style of Illusia, viewed from Capture360XPro, Historical light. High resolution, 8k, insanely detailed.",], ] css = """ #col-container {max-width: 768px; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} .animate-spin { animation: spin 1s linear infinite; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 15rem; height: 36px; } div#share-btn-container > div { flex-direction: row; background: black; align-items: center; } #share-btn-container:hover { background-color: #060606; } #share-btn { all: initial; color: #ffffff; font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important; right:0; } #share-btn * { all: unset; } #share-btn-container div:nth-child(-n+2){ width: auto !important; min-height: 0px !important; } #share-btn-container .wrap { display: none !important; } #share-btn-container.hidden { display: none!important; } img[src*='#center'] { display: inline-block; margin: unset; } .footer { margin-bottom: 45px; margin-top: 10px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } """ def mode_update(options): if 'Disable Turbo' in options: return [gr.Slider(minimum=5, maximum=60, value=50), gr.Slider(minimum=1.0, maximum=20.0, value=7.5)] else: return [gr.Slider(minimum=2, maximum=6, value=4), gr.Slider(minimum=0.0, maximum=1.0, value=0.0)] with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown( """

FreeScale (unleash the resolution of SDXL)

FreeScale: Unleashing the Resolution of Diffusion Models via Tuning-Free Scale Fusion

[arXiv]      [Project Page]      [Code]

""" ) prompt_in = gr.Textbox(label="Prompt", placeholder="A panda walking and munching bamboo in a bamboo forest.") with gr.Row(): with gr.Accordion('Advanced Settings', open=False): with gr.Row(): output_size = gr.Dropdown(["2048 x 2048", "1024 x 2048", "2048 x 1024"], value="2048 x 2048", label="Output Size (H x W)", info="Due to GPU constraints, run the demo locally for higher resolutions.", scale=3) options = gr.CheckboxGroup(['Disable Turbo', 'Disable FreeU'], label="Options", info="NOT recommended to change", scale=2) with gr.Row(): ddim_steps = gr.Slider(label='DDIM Steps', minimum=2, maximum=6, step=1, value=4) guidance_scale = gr.Slider(label='Guidance Scale (Disabled in Turbo)', minimum=0.0, maximum=1.0, step=0.1, value=0.0) with gr.Row(): cosine_scale = gr.Slider(label='Cosine Scale', minimum=0, maximum=10, step=0.1, value=2.0) seed = gr.Slider(label='Random Seed', minimum=0, maximum=10000, step=1, value=123) with gr.Row(): negative_prompt = gr.Textbox(label='Negative Prompt', value='blurry, ugly, duplicate, poorly drawn, deformed, mosaic') options.change(mode_update, options, [ddim_steps, guidance_scale]) submit_btn = gr.Button("Generate", variant='primary') image_result = gr.Image(label="Image Output") gr.Examples(examples=examples, inputs=[prompt_in, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt]) submit_btn.click(fn=infer, inputs=[prompt_in, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt], outputs=[image_result], api_name="freescalehf") if __name__ == "__main__": demo.queue(max_size=8).launch()