Spaces:
Sleeping
Sleeping
File size: 1,641 Bytes
0925cf1 40822a4 c63d488 0925cf1 8ef1d5d 0925cf1 429b368 0925cf1 a17e285 92ec9db 40822a4 92ec9db 0925cf1 a17e285 92ec9db 1ffcdeb 92ec9db 0925cf1 92ec9db 0925cf1 54accb7 7a2c267 92ec9db 7a2c267 92ec9db 8ef1d5d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import torch
from diffusers import DiffusionPipeline
import gradio as gr
import os
import spaces
model_list = (os.environ.get("MODELS").split(",")).strip()
lora_list = os.environ.get("LORAS") # Not in use
models = {}
for model_name in model_list:
try:
models[model_name] = DiffusionPipeline.from_pretrained(model_name).to("cuda")
except Exception as e:
print(f"Error loading model {model_name}: {e}")
@spaces.GPU
def generate_image(model_name, prompt, negative_prompt, num_inference_steps, guidance_scale):
pipe = models[model_name]
image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)["stabilityai/stable-diffusion-xl-base-1.0"][0]
return image
# Create the Gradio blocks
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(choices=list(models.keys()), value=model_list[0] if model_list else None, label="Model")
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(label="Negative Prompt", value="")
num_inference_steps = gr.Slider(minimum=10, maximum=50, step=1, value=25, label="Number of Inference Steps")
guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.5, value=7.5, label="Guidance Scale")
with gr.Column():
output_image = gr.Image(label="Generated Image")
generate_btn = gr.Button("Generate Image")
generate_btn.click(generate_image, inputs=[model_dropdown, prompt, negative_prompt, num_inference_steps, guidance_scale], outputs=output_image)
demo.launch() |