Spaces:
Sleeping
Sleeping
import torch | |
from diffusers import DiffusionPipeline | |
import gradio as gr | |
import os | |
import spaces | |
# Load the models outside of the generate_images function | |
model_list = [model.strip() for model in os.environ.get("MODELS").split(",")] | |
lora_list = [model.strip() for model in os.environ.get("LORAS").split(",")] | |
print(f"Detected {len(model_list)} on models and {len(lora_list)} LoRAs.") | |
models = {} | |
for model_name in model_list: | |
try: | |
print(f"\n\nLoading {model_name}...") | |
models[model_name] = DiffusionPipeline.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda") | |
except Exception as e: | |
print(f"Error loading model {model_name}: {e}") | |
def generate_images( | |
model_name, | |
prompt, | |
negative_prompt, | |
num_inference_steps, | |
guidance_scale, | |
height, | |
width, | |
num_images=4, | |
progress=gr.Progress(track_tqdm=True) | |
): | |
if prompt is not None and prompt.strip() != "": | |
pipe = models.get(model_name) | |
if pipe is None: | |
return [] | |
print(f"Prompt is: [ {prompt} ]") | |
outputs = [] | |
for _ in range(num_images): | |
output = pipe( | |
prompt, | |
negative_prompt=negative_prompt, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=guidance_scale, | |
height=height, | |
width=width | |
)["images"][0] | |
outputs.append(output) | |
return outputs | |
else: | |
gr.Warning("Prompt empty!") | |
# Create the Gradio blocks | |
with gr.Blocks(css="style.css", theme='derekzen/stardust') as demo: | |
with gr.Row(equal_height=False): | |
with gr.Column(): | |
with gr.Group(): | |
model_dropdown = gr.Dropdown(choices=list(models.keys()), value=model_list[0] if model_list else None, label="Model") | |
prompt = gr.Textbox(label="Prompt") | |
generate_btn = gr.Button("Generate Image") | |
with gr.Accordion("Advanced", open=False): | |
negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]") | |
num_inference_steps = gr.Slider(minimum=10, maximum=50, step=1, value=25, label="Number of Inference Steps") | |
guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.5, value=7.5, label="Guidance Scale") | |
height = gr.Slider(minimum=1024, maximum=2048, step=256, value=1024, label="Height") | |
width = gr.Slider(minimum=1024, maximum=2048, step=256, value=1024, label="Width") | |
num_images = gr.Slider(minimum=1, maximum=4, step=1, value=4, label="Number of Images") | |
with gr.Column(): | |
output_gallery = gr.Gallery(label="Generated Images", height=480, scale=1) | |
generate_btn.click(generate_images, inputs=[model_dropdown, prompt, negative_prompt, num_inference_steps, guidance_scale, height, width, num_images], outputs=output_gallery) | |
demo.launch() | |