Spaces:
Build error
Build error
File size: 4,425 Bytes
ec3b96a 1f2f15c 166b6db 2509eb1 1f2f15c 547b516 1f2f15c e4b31e4 1f2f15c e4b31e4 1f2f15c e4b31e4 93780aa b879202 1f2f15c 77e039c ec3b96a e4b31e4 ec3b96a 1f2f15c ec3b96a e4b31e4 b879202 1f2f15c e4b31e4 b879202 1f2f15c b879202 1f2f15c e4b31e4 1f2f15c ec3b96a 1f2f15c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler
from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
from huggingface_hub import hf_hub_download
import numpy as np
import math
import spaces
import torch
edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
normal_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl.safetensors")
def set_timesteps_patched(self, num_inference_steps: int, device = None):
self.num_inference_steps = num_inference_steps
ramp = np.linspace(0, 1, self.num_inference_steps)
sigmas = torch.linspace(math.log(self.config.sigma_min), math.log(self.config.sigma_max), len(ramp)).exp().flip(0)
sigmas = (sigmas).to(dtype=torch.float32, device=device)
self.timesteps = self.precondition_noise(sigmas)
self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)])
self._step_index = None
self._begin_index = None
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
EDMEulerScheduler.set_timesteps = set_timesteps_patched
pipe_edit = CosStableDiffusionXLInstructPix2PixPipeline.from_single_file(
edit_file, num_in_channels=8
)
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
pipe_edit.to("cuda")
pipe_normal = StableDiffusionXLPipeline.from_single_file(normal_file, torch_dtype=torch.float16)
pipe_normal.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
pipe_normal.to("cuda")
@spaces.GPU
def run_normal(prompt, negative_prompt, guidance_scale, progress=gr.Progress(track_tqdm=True)):
return pipe_normal(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=20).images[0]
@spaces.GPU
def run_edit(image, prompt, negative_prompt, guidance_scale, progress=gr.Progress(track_tqdm=True)):
resolution = 1024
image.resize((resolution, resolution))
return pipe_edit(prompt=prompt,image=image,height=resolution,width=resolution,negative_prompt=negative_prompt, guidance_scale=guidance_scale,num_inference_steps=20).images[0]
css = '''
.gradio-container{
max-width: 768px !important;
margin: 0 auto;
}
'''
with gr.Blocks(css=css) as demo:
gr.Markdown('''# CosXL demo
Unofficial demo for CosXL, a SDXL model tuned to produce full color range images. CosXL Edit allows you to perform edits on images. Both have a [non-commercial community license](https://huggingface.co./stabilityai/cosxl/blob/main/LICENSE)
''')
with gr.Tab("CosXL"):
with gr.Group():
with gr.Row():
prompt_normal = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt, e.g.: backlit photography of a dog")
button_normal = gr.Button("Generate", min_width=120)
output_normal = gr.Image(label="Your result image", interactive=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_normal = gr.Textbox(label="Negative Prompt")
guidance_scale_normal = gr.Number(label="Guidance Scale", value=7)
with gr.Tab("CosXL Edit"):
with gr.Group():
image_edit = gr.Image(label="Image you would like to edit", type="pil")
with gr.Row():
prompt_edit = gr.Textbox(show_label=False, scale=4, placeholder="Edit instructions, e.g.: Make the day cloudy")
button_edit = gr.Button("Generate", min_width=120)
output_edit = gr.Image(label="Your result image", interactive=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_edit = gr.Textbox(label="Negative Prompt")
guidance_scale_edit = gr.Number(label="Guidance Scale", value=7)
button_edit.click(
)
gr.on(
triggers=[
button_normal.click,
prompt_normal.submit
],
fn=run_normal,
inputs=[prompt_normal, negative_prompt_normal, guidance_scale_normal],
outputs=[output_normal],
)
gr.on(
triggers=[
button_edit.click,
prompt_edit.submit
],
fn=run_edit,
inputs=[image_edit, prompt_edit, negative_prompt_edit, guidance_scale_edit],
outputs=[output_edit]
)
if __name__ == "__main__":
demo.launch(share=True)
|