Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
from diffusers import AutoencoderKL | |
from diffusers import StableDiffusionUpscalePipeline | |
from PIL import Image | |
device = "cuda" | |
seed = 100 | |
def execute(input_image): | |
model_id = "stabilityai/stable-diffusion-x4-upscaler" | |
# GPUを使用する場合 | |
pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") #not support vae? | |
pipe = pipe.to(device) | |
pipe.enable_attention_slicing() | |
#pipe.enable_model_cpu_offload() | |
pipe.enable_xformers_memory_efficient_attention() | |
#pipe.enable_vae_tiling() | |
pipe.vae.enable_tiling() | |
# 画像のパスとプロンプト | |
prompt = "beautiful girl" | |
first_resize_w = 0 | |
first_resize_h = 0 | |
# 画像の読み込みとリサイズ | |
image = input_image#.convert("RGB") | |
low_res_img = image | |
if first_resize_w!=0 and first_resize_h!=0: | |
low_res_img = image.resize((first_resize_w, first_resize_h)) | |
upscaled_image = upscale(pipe, prompt, low_res_img) | |
return upscaled_image | |
def upscale(pipe, prompt, img, step=50, guidance_scale=7.5): | |
generator = torch.Generator(device).manual_seed(seed) | |
return pipe(prompt=prompt,generator=generator, image=img, num_inference_steps=step, guidance_scale=guidance_scale).images[0] | |
if __name__ == "__main__": | |
image = Image.open("sample.jpg") | |
upscaled_image = execute(image) | |
upscaled_image.save("output.jpg") | |