CloneJourney / app.py
FilipeR's picture
Update app.py
ff46fdc verified
raw
history blame
8.21 kB
#!/usr/bin/env python
import json
import os
import random
from typing import Tuple
import uuid
from diffusers import DiffusionPipeline
import gradio as gr
import numpy as np
from PIL import Image
import spaces
import torch
from gradio_imagefeed import ImageFeed
DEFAULT_STYLE = "Photograph"
DEFAULT_NEGATIVE = (
"(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon,"
" drawing, anime, asian, bad anatomy:1.4), text, cropped, out of frame,"
" worst quality, low quality, morbid, mutilated,"
" extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation,"
" deformed, blurry, bad anatomy, bad proportions, extra limbs,"
" plastic, fake, missing arms, missing legs, fat, ugly, huge breasts,"
" extra arms, extra legs, fused fingers, too many fingers"
)
STYLES = {
"Photograph": (
(
"realistic photograph of {positive}, ultra fine detail, lifelike,"
" high-resolution, sharp, realistic colors, photorealistic, Nikon, 35mm"
),
DEFAULT_NEGATIVE,
),
"Cinematic": (
(
"cinematic photograph of {positive}, 35mm photograph, film, bokeh,"
" professional, 4k, highly detailed"
),
DEFAULT_NEGATIVE,
),
"Still Photo": (
(
"cinematic still photograph of {positive}, emotional, harmonious, vignette,"
" highly detailed, bokeh, cinemascope, moody, epic, gorgeous, film grain,"
" grainy, high resolution"
),
DEFAULT_NEGATIVE,
),
'No Style': (
'{positive}',
'')
}
def apply_style(name: str, pos: str, neg: str) -> Tuple[str, str]:
try:
def_pos, def_neg = STYLES[name]
except KeyError:
def_pos, def_neg = "{positive}", ""
finally:
pos = def_pos.replace("{positive}", pos).strip().strip(",")
neg = def_neg + (", " + neg).strip().strip(",")
return (pos, neg)
DESCRIPTION = ""
MAX_SEED = np.iinfo(np.int32).max
CACHE_EXAMPLES = False # "lazy"
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
NUM_IMAGES_PER_PROMPT = 3
if torch.cuda.is_available():
pipe = DiffusionPipeline.from_pretrained(
"SG161222/RealVisXL_V4.0",
torch_dtype=torch.float16,
use_safetensors=True,
add_watermarker=False,
variant="fp16",
)
pipe2 = DiffusionPipeline.from_pretrained(
"SG161222/RealVisXL_V3.0_Turbo",
torch_dtype=torch.float16,
use_safetensors=True,
add_watermarker=False,
variant="fp16",
)
if ENABLE_CPU_OFFLOAD:
pipe.enable_model_cpu_offload()
pipe2.enable_model_cpu_offload()
else:
pipe.to(device)
pipe2.to(device)
print("Loaded on Device!")
if USE_TORCH_COMPILE:
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
print("Model Compiled!")
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@spaces.GPU(enable_queue=True)
def generate(
prompt: str,
negative_prompt: str = "",
use_negative_prompt: bool = False,
style: str = DEFAULT_STYLE,
seed: int = 0,
width: int = 896,
height: int = 1152,
guidance_scale: float = 3,
randomize_seed: bool = False,
use_resolution_binning: bool = True,
progress=gr.Progress(track_tqdm=True),
):
prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
seed = int(randomize_seed_fn(seed, randomize_seed))
generator = torch.Generator().manual_seed(seed)
options = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": width,
"height": height,
"guidance_scale": guidance_scale,
"num_inference_steps": 25,
"generator": generator,
"num_images_per_prompt": NUM_IMAGES_PER_PROMPT,
"use_resolution_binning": use_resolution_binning,
"output_type": "pil",
}
images = pipe(**options).images + pipe2(**options).images
image_paths = [save_image(img) for img in images]
return image_paths, seed
examples = [
(
"college life of 21 year old, depth of field, bokeh, shallow"
" focus, minimalism, fujifilm xh2s with Canon EF lens, cinematic --ar 85:128"
" --v 6.0 --style raw"
),
]
css = ""
with gr.Blocks(css=css, theme="rawrsor1/Everforest") as demo:
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=4,
placeholder="Enter a Prompt",
container=False,
)
run_button = gr.Button("Run")
#result = ImageFeed(label="Result")
result = gr.Gallery(label="Result", preview=True)
with gr.Accordion("Advanced", open=False):
use_negative_prompt = gr.Checkbox(
label="Use Negative", value=True, visible=True
)
negative_prompt = gr.Text(
label="Negative Prompt",
max_lines=4,
placeholder="",
value="",
visible=True,
)
with gr.Row():
num_inference_steps = gr.Slider(
label="Steps",
minimum=10,
maximum=60,
step=1,
value=30,
interactive=True
)
with gr.Row():
num_images_per_prompt = gr.Slider(
label="Image Count",
minimum=1,
maximum=5,
step=1,
value=2,
interactive=True
)
seed = gr.Slider(
label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, visible=True
)
randomize_seed = gr.Checkbox(label="New Seed", value=True)
with gr.Row(visible=True):
width = gr.Slider(
label="Width",
minimum=512,
maximum=2048,
step=16,
value=896,
interactive=True
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=2048,
step=16,
value=1152,
interactive=True
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance",
minimum=0.1,
maximum=20.0,
step=0.1,
value=6,
interactive=True
)
with gr.Row(visible=True):
style_selection = gr.Radio(
show_label=True,
container=True,
interactive=True,
choices=list(STYLES.keys()),
value=DEFAULT_STYLE,
label="Style",
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[result, seed],
fn=generate,
cache_examples=CACHE_EXAMPLES,
)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
api_name=False,
)
gr.on(
triggers=[
prompt.submit,
negative_prompt.submit,
run_button.click,
],
fn=generate,
inputs=[
prompt,
negative_prompt,
use_negative_prompt,
style_selection,
seed,
width,
height,
guidance_scale,
randomize_seed,
],
outputs=[result, seed],
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=20).launch()