import gradio as gr import torch from diffusers import AutoPipelineForText2Image # List of available models MODEL_OPTIONS = { "Stable Diffusion 1.5": "runwayml/stable-diffusion-v1-5", "Stable Diffusion 2.1": "stabilityai/stable-diffusion-2-1", "Stable Diffusion XL": "stabilityai/stable-diffusion-xl-base-1.0" } def generate_image( model_choice, lora_url, prompt, negative_prompt, steps, width, height, guidance_scale, seed ): # Get the selected model ID model_id = MODEL_OPTIONS[model_choice] # Initialize the pipeline pipe = AutoPipelineForText2Image.from_pretrained( model_id, torch_dtype=torch.float16, use_safetensors=True ).to("cuda") # Load LoRA weights if provided if lora_url: pipe.load_lora_weights(lora_url) pipe.fuse_lora() # Set up generator with seed if provided generator = None if seed: generator = torch.Generator(device="cuda").manual_seed(int(seed)) # Generate image image = pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=int(steps), width=int(width), height=int(height), guidance_scale=guidance_scale, generator=generator ).images[0] return image # Gradio UI components with gr.Blocks() as demo: gr.Markdown("## 🎨 Text-to-Image Generation with LoRA") with gr.Row(): with gr.Column(): model_choice = gr.Dropdown( label="Select Base Model", choices=list(MODEL_OPTIONS.keys()), value="Stable Diffusion XL" ) lora_url = gr.Textbox( label="LoRA Repository ID (e.g., 'username/lora-name')", placeholder="Optional Hugging Face repository ID" ) prompt = gr.Textbox( label="Prompt", placeholder="Enter your prompt here..." ) negative_prompt = gr.Textbox( label="Negative Prompt", placeholder="Enter what to exclude from the image..." ) with gr.Row(): steps = gr.Number( label="Inference Steps", value=25, precision=0 ) guidance_scale = gr.Slider( label="Guidance Scale", minimum=1.0, maximum=20.0, value=7.5 ) with gr.Row(): width = gr.Number( label="Width", value=1024, precision=0 ) height = gr.Number( label="Height", value=1024, precision=0 ) seed = gr.Number( label="Seed (optional)", precision=0 ) generate_btn = gr.Button("Generate Image", variant="primary") with gr.Column(): output_image = gr.Image(label="Generated Image", height=600) generate_btn.click( fn=generate_image, inputs=[ model_choice, lora_url, prompt, negative_prompt, steps, width, height, guidance_scale, seed ], outputs=output_image ) demo.launch()