import gradio as gr from diffusers import DiffusionPipeline import torch # Load the pipeline with optimizations for CPU pipeline = DiffusionPipeline.from_pretrained( "John6666/t-ponynai3-v6-sdxl", torch_dtype=torch.float16, # Use FP16 precision if supported safety_checker=None, # Disable safety checker for faster performance ).to("cpu") # Enable attention slicing for memory management pipeline.enable_attention_slicing() def generate_image(prompt, negative_prompt, progress=gr.Progress()): num_inference_steps = 20 # Set number of inference steps # Track progress for each step for i in range(num_inference_steps): progress(i / num_inference_steps) # Update progress bar # Perform generation step by step (simulate the process) image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps).images[0] return image # Create Gradio interface with gr.Blocks() as demo: gr.Markdown("# Text-to-Image Generator with John6666/t-ponynai3-v6-sdxl models") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate") negative_prompt = gr.Textbox(label="Enter negative prompt", placeholder="Describe what you want to avoid") generate_button = gr.Button("Generate") with gr.Column(): output_image = gr.Image(label="Generated Image") # Add the progress bar component and connect it with the generate_image function generate_button.click(fn=generate_image, inputs=[prompt, negative_prompt], outputs=output_image) # Launch the Gradio app demo.launch()