fffiloni's picture
Update app.py
60bf902
import gradio as gr
import os
from gradio_client import Client
title="Prompt Converter"
description="""
<h1>Prompt Converter</h1>
<p style="text-align:center;">
Stable Diffusion 2 uses OpenCLIP ViT-H model trained on LAION dataset so it knows different things than the OpenAI ViT-L we're all used to prompting.
<br />This demo converts a v1.x stable diffusion prompt to a stable diffusion 2.x prompt,
<br />by generating an image through <a href="https://huggingface.co./runwayml/stable-diffusion-v1-5" target="_blank">RunwayML Stable Diffusion 1.5</a>, then Interrogate the resulting image through <a href="https://huggingface.co./spaces/fffiloni/CLIP-Interrogator-2" target="_blank">CLIP Interrogator 2</a> to give you a Stable Diffusion 2 equivalent prompt.
</p>
"""
stable_diffusion = Client("https://runwayml-stable-diffusion-v1-5.hf.space/")
clip_interrogator_2 = Client("https://fffiloni-clip-interrogator-2.hf.space/")
def get_images(prompt):
print("Calling SD")
gallery_dir = stable_diffusion.predict(prompt, fn_index=2)
print(f"Gallery Directory: {gallery_dir}")
img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
return img_results[0]
def get_new_prompt(img, mode):
interrogate = clip_interrogator_2.predict(
img, # str (filepath or URL to image) in 'parameter_3' Image component
mode, # str in 'Select mode' Radio component
12, # int | float (numeric value between 2 and 24) in 'best mode max flavors' Slider component
api_name="/clipi2"
)
#interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2")
return interrogate
def infer(prompt, mode):
img = get_images(prompt)
result = get_new_prompt(img, mode)
#return result[0], img
return img
with gr.Blocks() as demo:
gr.HTML(description)
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(lines=4, label="Input v1.x Stable Diffusion prompt")
mode_input = gr.Radio(['best', 'classic', 'fast'], label='mode', value='fast')
submit_btn = gr.Button("Submit")
with gr.Column():
sd_inter = gr.Image()
prompt_output = gr.Textbox(lines=4, label="Converted v2.x Stable Diffusion prompt")
submit_btn.click(
fn=infer, inputs=[prompt_input,mode_input], outputs=[sd_inter]
)
examples=[
["girl with steampunk weapons and uniform, serious, finely detailed, made by wlop, boichi, ilya kuvshinov, full body portrait, illustration, grass, sunny, sky, anime, side view, perfect anime face, detailed face, zoomed out, smooth","fast"],
["a yellow cockatiel riding on the rings of saturn wearing a propeller hat, fantasy, intricate, elegant, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha ","classic"],
["painting, view from inside edward hopper's painting nighthawks, of a group of werebears robbing a bank, foggy ","best"]
]
gr.Examples(
examples = examples,
fn = infer,
inputs=[prompt_input,mode_input],
outputs=[sd_inter]
)
#demo=gr.Interface(fn=infer, inputs=[prompt_input,mode_input], outputs=[prompt_output],title=title,description=description,examples=examples)
demo.queue(max_size=10,concurrency_count=20)
demo.launch(enable_queue=True)