import torch from diffusers import DiffusionPipeline import gradio as gr import os import spaces model_list = [model.strip() for model in os.environ.get("MODELS").split(",")] lora_list = [model.strip() for model in os.environ.get("LORAS").split(",")] models = {} for model_name in model_list: try: models[model_name] = DiffusionPipeline.from_pretrained(model_name).to("cuda") except Exception as e: print(f"Error loading model {model_name}: {e}") @spaces.GPU def generate_image(model_name, prompt, negative_prompt, num_inference_steps, guidance_scale): pipe = models[model_name] output = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)["images"][0] return output # Create the Gradio blocks with gr.Blocks() as demo: with gr.Row(): with gr.Column(): model_dropdown = gr.Dropdown(choices=list(models.keys()), value=model_list[0] if model_list else None, label="Model") prompt = gr.Textbox(label="Prompt") negative_prompt = gr.Textbox(label="Negative Prompt", value="") num_inference_steps = gr.Slider(minimum=10, maximum=50, step=1, value=25, label="Number of Inference Steps") guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.5, value=7.5, label="Guidance Scale") with gr.Column(): output_image = gr.Image(label="Generated Image") generate_btn = gr.Button("Generate Image") generate_btn.click(generate_image, inputs=[model_dropdown, prompt, negative_prompt, num_inference_steps, guidance_scale], outputs=output_image) demo.launch()