import gradio as gr from time import sleep from diffusers import DiffusionPipeline from huggingface_hub import hf_hub_download import torch import json import random import copy lora_list = hf_hub_download(repo_id="multimodalart/LoraTheExplorer", filename="sdxl_loras.json", repo_type="space") with open(lora_list, "r") as file: data = json.load(file) sdxl_loras = [ { "image": item["image"] if item["image"].startswith("https://") else f'https://huggingface.co./spaces/multimodalart/LoraTheExplorer/resolve/main/{item["image"]}', "title": item["title"], "repo": item["repo"], "trigger_word": item["trigger_word"], "weights": item["weights"], "is_compatible": item["is_compatible"], "is_pivotal": item.get("is_pivotal", False), "text_embedding_weights": item.get("text_embedding_weights", None), "is_nc": item.get("is_nc", False) } for item in data ] saved_names = [ hf_hub_download(item["repo"], item["weights"]) for item in sdxl_loras ] for item, saved_name in zip(sdxl_loras, saved_names): item["saved_name"] = saved_name css = ''' #title{text-align:center} #plus_column{align-self: center} #plus_button{font-size: 250%; text-align: center;margin-bottom: 44.75px} .gradio-container{width: 700px !important; margin: 0 auto !important} #prompt input{width: calc(100% - 160px);border-top-right-radius: 0px;border-bottom-right-radius: 0px;} #run_button{position:absolute;margin-top: 57px;right: 0;margin-right: 0.8em;border-bottom-left-radius: 0px; border-top-left-radius: 0px;} ''' #@spaces.GPU def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)): pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16) pipe.to(torch_dtype=torch.float16) pipe.to("cuda") print("Loading LoRAs") pipe.load_lora_weights(shuffled_items[0]['saved_name']) pipe.fuse_lora(lora_1_scale) pipe.load_lora_weights(shuffled_items[1]['saved_name']) pipe.fuse_lora(lora_2_scale) if negative_prompt == "": negative_prompt = False print("Running inference") image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, guidance_scale=7).images[0] return image def get_description(item): trigger_word = item["trigger_word"] return f"LoRA trigger word: `{trigger_word}`" if trigger_word else "No trigger word, will be applied automatically", trigger_word def shuffle_images(): compatible_items = [item for item in sdxl_loras if item['is_compatible']] random.shuffle(compatible_items) two_shuffled_items = compatible_items[:2] title_1 = gr.update(label=two_shuffled_items[0]['title'], value=two_shuffled_items[0]['image']) title_2 = gr.update(label=two_shuffled_items[1]['title'], value=two_shuffled_items[1]['image']) description_1, trigger_word_1 = get_description(two_shuffled_items[0]) description_2, trigger_word_2 = get_description(two_shuffled_items[1]) prompt_description_1 = gr.update(value=description_1, visible=True) prompt_description_2 = gr.update(value=description_2, visible=True) prompt = gr.update(value=f"{trigger_word_1} {trigger_word_2}") return title_1, prompt_description_1, title_2, prompt_description_2, prompt, two_shuffled_items with gr.Blocks(css=css) as demo: shuffled_items = gr.State() title = gr.HTML( '''