Spaces:
Runtime error
Runtime error
import gradio as gr | |
#import torch | |
#from torch import autocast // only for GPU | |
from PIL import Image | |
import numpy as np | |
from io import BytesIO | |
import os | |
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD') | |
from diffusers import StableDiffusionImg2ImgPipeline | |
print("hello sylvain") | |
YOUR_TOKEN=MY_SECRET_TOKEN | |
device="cpu" | |
#prompt_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN) | |
#prompt_pipe.to(device) | |
img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN) | |
img_pipe.to(device) | |
source_img = gr.Image(source="upload", type="filepath", label="init_img | 512*512 px") | |
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto") | |
def resize(w_val,l_val,img): | |
#baseheight = value | |
img = Image.open(img) | |
#hpercent = (baseheight/float(img.size[1])) | |
#wsize = int((float(img.size[0])*float(hpercent))) | |
#img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS) | |
img = img.resize((w_val,l_val), Image.Resampling.LANCZOS) | |
return img | |
#init_image = init_image.resize((768, 512)) | |
def infer(prompt, source_img): | |
source_image = resize(512, 512, source_img) | |
source_image.save('source.png') | |
images_list = img_pipe([prompt] * 2, init_image=source_image, strength=0.75) | |
images = [] | |
safe_image = Image.open(r"unsafe.png") | |
for i, image in enumerate(images_list["sample"]): | |
if(images_list["nsfw_content_detected"][i]): | |
images.append(safe_image) | |
else: | |
images.append(image) | |
return images | |
print("Great sylvain ! Everything is working fine !") | |
title="Img2Img Stable Diffusion CPU" | |
description="Img2Img Stable Diffusion example using CPU and HF token. <br />Warning: Slow process... ~5/10 min inference time. <b>NSFW filter enabled.</b>" | |
gr.Interface(fn=infer, inputs=["text", source_img], outputs=gallery,title=title,description=description).queue(max_size=100).launch(enable_queue=True) | |
#from torch import autocast | |
#import requests | |
#import torch | |
#from PIL import Image | |
#from io import BytesIO | |
#import os | |
#MY_SECRET_TOKEN = os.environ.get('HF_TOKEN_SD') | |
#from diffusers import StableDiffusionImg2ImgPipeline | |
#YOUR_TOKEN = MY_SECRET_TOKEN | |
# load the pipeline | |
#device = "cuda" | |
#model_id_or_path = "CompVis/stable-diffusion-v1-4" | |
# pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token = YOUR_TOKEN) | |
#pipe = StableDiffusionImg2ImgPipeline.from_pretrained( | |
# model_id_or_path, | |
# revision="fp16", | |
# torch_dtype=torch.float16, | |
# use_auth_token=YOUR_TOKEN | |
#) | |
# or download via git clone https://huggingface.co./CompVis/stable-diffusion-v1-4 | |
# and pass `model_id_or_path="./stable-diffusion-v1-4"` without having to use `use_auth_token=True`. | |
#pipe = pipe.to(device) | |
# let's download an initial image | |
#url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" | |
#response = requests.get(url) | |
#init_image = Image.open(BytesIO(response.content)).convert("RGB") | |
#init_image = init_image.resize((768, 512)) | |
#prompt = "Lively, illustration of a [[[<king::4>]]], portrait, fantasy, intricate, Scenic, hyperdetailed, hyper realistic <king-hearthstone>, unreal engine, 4k, smooth, sharp focus, intricate, cinematic lighting, highly detailed, octane, digital painting, artstation, concept art, vibrant colors, Cinema4D, WLOP, 3d render, in the style of hearthstone::5 art by Artgerm and greg rutkowski and magali villeneuve, martina jackova, Giger" | |
#with autocast("cuda"): | |
# images = pipe(prompt=prompt, init_image=init_image, strength=0.75, guidance_scale=7.5).images | |
#images[0].save("fantasy_landscape.png") |