import torch from diffusers import StableDiffusionXLPipeline import numpy as np import gradio as gr import random from compel import Compel, ReturnedEmbeddingsType device = "cuda" if torch.cuda.is_available() else "cpu" if torch.cuda.is_available(): torch.cuda.max_memory_allocated(device=device) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) pipe = pipe.to(device) else: pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) pipe = pipe.to(device) pipe.safety_checker = None pipe.load_lora_weights("artificialguybr/ps1redmond-ps1-game-graphics-lora-for-sdxl", weight_name="PS1Redmond-PS1Game-Playstation1Graphics.safetensors") lora_activation_words = "playstation 1 graphics, PS1 Game, " MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 def infer(conditioning, pooled, neg_conditioning, neg_pooled, height, width, num_inference_steps, guidance_scale, seed, randomize_seed, lora_weight): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) image = pipe( prompt_embeds=conditioning, pooled_prompt_embeds=pooled, negative_prompt_embeds=neg_conditioning, negative_pooled_prompt_embeds=neg_pooled, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator, cross_attention_kwargs={"scale": lora_weight} ).images[0] return image def get_embeds(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed, randomize_seed, lora_weight): compel = Compel( tokenizer=[pipe.tokenizer, pipe.tokenizer_2] , text_encoder=[pipe.text_encoder, pipe.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True] ) prompt = lora_activation_words + prompt conditioning, pooled = compel(prompt) neg_conditioning, neg_pooled = compel(negative_prompt) image = infer(conditioning, pooled, neg_conditioning, neg_pooled, height, width, num_inference_steps, guidance_scale, seed, randomize_seed, lora_weight) return image css=""" #col-container { margin: 0 auto; max-width: 520px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(f""" # Text-to-Image Gradio Template Currently running on {device.upper()}. """) with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): negative_prompt = gr.Text( label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=True, ) seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider( label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, ) height = gr.Slider( label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, ) with gr.Row(): guidance_scale = gr.Slider( label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=7.5, ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=100, step=1, value=30, ) with gr.Row(): lora_weight = gr.Slider( label="LoRA weight", minimum=0.0, maximum=5.0, step=0.01, value=1, ) run_button.click( fn = get_embeds, inputs = [prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed, randomize_seed, lora_weight], outputs = [result] ) demo.launch(debug=True)