gradio-on-cog / app.py
radames's picture
gradio slide
5a8c0f6
raw
history blame
5.72 kB
import gradio as gr
from PIL import Image
from gradio_imageslider import ImageSlider
import requests
import base64
import numpy as np
import random
import io
URL = "http://localhost:5000/predictions"
HEADERS = {
"Content-Type": "application/json",
}
MAX_SEED = np.iinfo(np.int32).max
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
def generate(
input_image: Image,
prompt: str,
negative_prompt: str = "",
seed: int = 0,
width: int = 1024,
height: int = 1024,
prior_num_inference_steps: int = 30,
# prior_timesteps: List[float] = None,
prior_guidance_scale: float = 4.0,
decoder_num_inference_steps: int = 12,
# decoder_timesteps: List[float] = None,
decoder_guidance_scale: float = 0.0,
num_images_per_prompt: int = 2,
) -> Image:
payload = {
"input": {
"hdr": 0,
"image": "http://localhost:7860/file=" + input_image,
"steps": 20,
"prompt": prompt,
"scheduler": "DDIM",
"creativity": 0.25,
"guess_mode": False,
"resolution": "original",
"resemblance": 0.75,
"guidance_scale": 7,
"negative_prompt": "teeth, tooth, open mouth, longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, mutant"
}
}
response = requests.post(URL, headers=HEADERS, json=payload)
json_response = response.json()
if 'output' in json_response:
base64_image = json_response["output"][0]
image_data = base64.b64decode(
base64_image.replace("data:image/png;base64,", ""))
image_stream = io.BytesIO(image_data)
return [Image.open(input_image), Image.open(image_stream)]
raise gr.Error(json_response["status"])
examples = [
["An astronaut riding a green horse", "examples/image2.png"],
["A mecha robot in a favela by Tarsila do Amaral", "examples/image2.png"],
["The sprirt of a Tamagotchi wandering in the city of Los Angeles",
"examples/image1.png"],
["A delicious feijoada ramen dish", "examples/image0.png"],
]
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
input_image = gr.Image(type="filepath")
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
with gr.Column():
result = ImageSlider(label="Result", type="pil")
with gr.Accordion("Advanced options", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a Negative Prompt",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=1024,
maximum=1024,
step=512,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=1024,
maximum=1024,
step=512,
value=1024,
)
num_images_per_prompt = gr.Slider(
label="Number of Images",
minimum=1,
maximum=2,
step=1,
value=1,
)
with gr.Row():
prior_guidance_scale = gr.Slider(
label="Prior Guidance Scale",
minimum=0,
maximum=20,
step=0.1,
value=4.0,
)
prior_num_inference_steps = gr.Slider(
label="Prior Inference Steps",
minimum=10,
maximum=30,
step=1,
value=20,
)
decoder_guidance_scale = gr.Slider(
label="Decoder Guidance Scale",
minimum=0,
maximum=0,
step=0.1,
value=0.0,
)
decoder_num_inference_steps = gr.Slider(
label="Decoder Inference Steps",
minimum=4,
maximum=12,
step=1,
value=10,
)
gr.Examples(
examples=examples,
inputs=[prompt, input_image],
outputs=result,
fn=generate,
cache_examples=True,
)
inputs = [
input_image,
prompt,
negative_prompt,
seed,
width,
height,
prior_num_inference_steps,
# prior_timesteps,
prior_guidance_scale,
decoder_num_inference_steps,
# decoder_timesteps,
decoder_guidance_scale,
num_images_per_prompt,
]
gr.on(
triggers=[prompt.submit, negative_prompt.submit, run_button.click],
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=result,
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=20).launch()