ofai-flx-logo / app.py
fantaxy's picture
Update app.py
a6021dc verified
raw
history blame
3.95 kB
import gradio as gr
import numpy as np
import random
import spaces
import torch
from diffusers import DiffusionPipeline
from transformers import pipeline
# ๋ฒˆ์—ญ ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
@spaces.GPU()
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
# ํ•œ๊ธ€ ์ž…๋ ฅ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
if any('\uAC00' <= char <= '\uD7A3' for char in prompt):
print("Translating Korean prompt...")
translated_prompt = translator(prompt, max_length=512)[0]['translation_text']
print("Translated prompt:", translated_prompt)
prompt = translated_prompt
image = pipe(
prompt = prompt,
width = width,
height = height,
num_inference_steps = num_inference_steps,
generator = generator,
guidance_scale=0.0
).images[0]
return image, seed
examples = [
"Create a new logo for a tech startup",
"Design an engaging Instagram post for a fashion brand",
"Create a new character for a social media campaign",
"Generate a marketing advertisement for a new product launch",
"Design a social media banner for a charity event",
"Create a new branding concept for a luxury hotel",
"Design a promotional video thumbnail for a movie premiere",
"Generate a marketing campaign for a sustainable lifestyle brand"
]
css = """
footer {
visibility: hidden;
}
"""
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
elem_id="prompt"
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False, elem_id="result")
with gr.Accordion("Advanced Settings", open=False, elem_id="advanced-settings"):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
with gr.Row():
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=4,
)
gr.Examples(
examples=examples,
fn=infer,
inputs=[prompt],
outputs=[result, seed],
cache_examples="lazy"
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs=[result, seed]
)
demo.launch()