Spaces:
Runtime error
Runtime error
import random | |
import os | |
import uuid | |
from datetime import datetime | |
import gradio as gr | |
import numpy as np | |
import spaces | |
import torch | |
from diffusers import DiffusionPipeline | |
from PIL import Image | |
# ---------- ์ด๊ธฐ ์ค์ ๋ฐ ๋ชจ๋ธ ๋ก๋ ---------- | |
SAVE_DIR = "saved_images" # Gradio๊ฐ ์ ์ฅ์ ๊ด๋ฆฌ๋ฅผ ์ํ | |
if not os.path.exists(SAVE_DIR): | |
os.makedirs(SAVE_DIR, exist_ok=True) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
repo_id = "black-forest-labs/FLUX.1-dev" | |
adapter_id = "openfree/pepe" | |
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16) | |
pipeline.load_lora_weights(adapter_id) | |
pipeline = pipeline.to(device) | |
MAX_SEED = np.iinfo(np.int32).max | |
MAX_IMAGE_SIZE = 1024 | |
def save_generated_image(image, prompt): | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
unique_id = str(uuid.uuid4())[:8] | |
filename = f"{timestamp}_{unique_id}.png" | |
filepath = os.path.join(SAVE_DIR, filename) | |
# ์ด๋ฏธ์ง ์ ์ฅ | |
image.save(filepath) | |
# ๋ฉํ๋ฐ์ดํฐ ์ ์ฅ | |
metadata_file = os.path.join(SAVE_DIR, "metadata.txt") | |
with open(metadata_file, "a", encoding="utf-8") as f: | |
f.write(f"{filename}|{prompt}|{timestamp}\n") | |
return filepath | |
def load_generated_images(): | |
if not os.path.exists(SAVE_DIR): | |
return [] | |
# ๋๋ ํ ๋ฆฌ ๋ด ์ด๋ฏธ์ง ํ์ผ ๋ก๋ | |
image_files = [ | |
os.path.join(SAVE_DIR, f) | |
for f in os.listdir(SAVE_DIR) | |
if f.endswith(('.png', '.jpg', '.jpeg', '.webp')) | |
] | |
# ์์ฑ ์๊ฐ ๊ธฐ์ค ์ ๋ ฌ (์ต์ ํ์ผ ์ฐ์ ) | |
image_files.sort(key=lambda x: os.path.getctime(x), reverse=True) | |
return image_files | |
def load_predefined_images(): | |
# ๋ณ๋ ์ฌ์ ์ด๋ฏธ์ง ์์ | |
return [] | |
css = """ | |
/* ๋ฐฐ๊ฒฝ ๊ทธ๋ผ๋์ธํธ๋ฅผ ์ฃผ๊ฑฐ๋, ํฐํธ/ํ์ดํ ํฌ๊ธฐ ๋ฑ์ ์ํ๋ ๋๋ก ๊พธ๋ฐ ์ ์์ต๋๋ค. */ | |
body { | |
font-family: 'Open Sans', sans-serif; | |
background: linear-gradient(135deg, #f5f7fa, #c3cfe2); | |
margin: 0; /* ๊ธฐ๋ณธ ์ฌ๋ฐฑ ์ ๊ฑฐ */ | |
padding: 0; | |
} | |
.title { | |
font-size: 1.8em; | |
font-weight: bold; | |
text-align: center; | |
margin: 20px 0; | |
} | |
footer { | |
visibility: hidden; | |
} | |
""" | |
def inference( | |
prompt: str, | |
seed: int, | |
randomize_seed: bool, | |
width: int, | |
height: int, | |
guidance_scale: float, | |
num_inference_steps: int, | |
lora_scale: float, | |
progress: gr.Progress = gr.Progress(track_tqdm=True), | |
): | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
generator = torch.Generator(device=device).manual_seed(seed) | |
image = pipeline( | |
prompt=prompt, | |
guidance_scale=guidance_scale, | |
num_inference_steps=num_inference_steps, | |
width=width, | |
height=height, | |
generator=generator, | |
joint_attention_kwargs={"scale": lora_scale}, | |
).images[0] | |
filepath = save_generated_image(image, prompt) | |
return image, seed, load_generated_images() | |
# ---------- ์์ ํ๋กฌํํธ ---------- | |
examples = [ | |
"Pepe the frog playing fetch with a golden retriever in a sunny park. He wears casual weekend clothes and tosses a bright red frisbee with a goofy grin. The dog leaps gracefully through the air, tail wagging with excitement. The warm afternoon sunlight filters through the trees, creating a humorous meme-like atmosphere. [pepe]", | |
"Pepe the frog dressed in full military gear, standing at attention with a standard-issue rifle. His crisp uniform is adorned with cartoonish medals. Other frog soldiers march in formation behind him during a grand meme parade, conveying discipline mixed with comical charm. [pepe]", | |
"A medieval Pepe knight in gleaming armor, proudly holding an ornate sword and shield. He stands in front of a majestic castle with a swirling moat. His shield features a cartoon frog crest, and sunlight gleams off his polished armor, adding a humorous yet epic feel. [pepe]", | |
"A charismatic Pepe the frog addressing a crowd from a podium. He wears a well-fitted suit and gestures with exaggerated cartoon expressions while speaking. The audience is filled with fellow frog characters holding supportive banners. Cameras capture this grand meme moment. [pepe]", | |
"Pepe the frog enjoying a peaceful morning at home, reading a newspaper at his kitchen table. He wears comfy pajamas and sips coffee from a novelty frog mug. Sunlight streams through the window, illuminating a quaint plant on the countertop in this cozy, meme-inspired scene. [pepe]", | |
"Businessman Pepe walking confidently through a sleek office lobby, briefcase in hand. He wears a tailored navy suit, and his wide frog eyes convey determination. Floor-to-ceiling windows reveal a bustling cityscape behind him, blending corporate professionalism with meme humor. [pepe]" | |
] | |
# ---------- UI ---------- | |
# ์ํ๋ ๊ทธ๋ผ๋์ค ํ ๋ง๋ฅผ ์ ํํด ์ ์ฉํฉ๋๋ค. ์๋๋ Soft ํ ๋ง์ primary_hue="emerald"๋ฅผ ์ง์ ํ ์์์ ๋๋ค. | |
with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="emerald"), analytics_enabled=False) as demo: | |
gr.HTML('<div class="title">PEPE Meme Generator</div>') | |
gr.HTML(""" | |
<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fopenfree-pepe.hf.space"> | |
<img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fopenfree-pepe.hf.space&countColor=%23263759" /> | |
</a> | |
""") | |
with gr.Tabs() as tabs: | |
with gr.Tab("Generation"): | |
with gr.Column(): | |
with gr.Row(): | |
prompt = gr.Text( | |
label="Prompt", | |
show_label=False, | |
max_lines=1, | |
placeholder="Enter your prompt", | |
container=False, | |
) | |
run_button = gr.Button("Run", scale=0) | |
result = gr.Image(label="Result", show_label=False) | |
with gr.Accordion("Advanced Settings", open=False): | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=MAX_SEED, | |
step=1, | |
value=42, | |
) | |
randomize_seed = gr.Checkbox( | |
label="Randomize seed", | |
value=True | |
) | |
with gr.Row(): | |
width = gr.Slider( | |
label="Width", | |
minimum=256, | |
maximum=MAX_IMAGE_SIZE, | |
step=32, | |
value=1024, | |
) | |
height = gr.Slider( | |
label="Height", | |
minimum=256, | |
maximum=MAX_IMAGE_SIZE, | |
step=32, | |
value=768, | |
) | |
with gr.Row(): | |
guidance_scale = gr.Slider( | |
label="Guidance scale", | |
minimum=0.0, | |
maximum=10.0, | |
step=0.1, | |
value=3.5, | |
) | |
num_inference_steps = gr.Slider( | |
label="Number of inference steps", | |
minimum=1, | |
maximum=50, | |
step=1, | |
value=30, | |
) | |
lora_scale = gr.Slider( | |
label="LoRA scale", | |
minimum=0.0, | |
maximum=1.0, | |
step=0.1, | |
value=1.0, | |
) | |
gr.Examples( | |
examples=examples, | |
inputs=[prompt], | |
outputs=[result, seed], | |
) | |
with gr.Tab("Gallery"): | |
gr.Markdown("### Generated Images Gallery") | |
generated_gallery = gr.Gallery( | |
label="Generated Images", | |
columns=6, | |
show_label=False, | |
value=load_generated_images(), | |
elem_id="generated_gallery", | |
height="auto" | |
) | |
refresh_btn = gr.Button("๐ Refresh Gallery") | |
# Gallery ์๋ก๊ณ ์นจ ํธ๋ค๋ฌ | |
def refresh_gallery(): | |
return load_generated_images() | |
refresh_btn.click( | |
fn=refresh_gallery, | |
inputs=None, | |
outputs=generated_gallery, | |
) | |
# Run ๋ฒํผ & ํ๋กฌํํธ ์ ๋ ฅ ์ด๋ฒคํธ ์ฒ๋ฆฌ | |
gr.on( | |
triggers=[run_button.click, prompt.submit], | |
fn=inference, | |
inputs=[ | |
prompt, | |
seed, | |
randomize_seed, | |
width, | |
height, | |
guidance_scale, | |
num_inference_steps, | |
lora_scale, | |
], | |
outputs=[result, seed, generated_gallery], | |
) | |
demo.queue() | |
demo.launch() | |