import os
import time
import random
import logging
from gradio.blocks import postprocess_update_dict
import numpy as np
from typing import Any, Dict, List, Optional, Union
import torch
from PIL import Image
import gradio as gr
from tempfile import NamedTemporaryFile
from diffusers import (
DiffusionPipeline,
AutoencoderTiny,
AutoencoderKL,
AutoPipelineForImage2Image,
FluxPipeline,
FlowMatchEulerDiscreteScheduler,
DPMSolverMultistepScheduler)
from huggingface_hub import (
hf_hub_download,
HfFileSystem,
ModelCard,
snapshot_download)
from diffusers.utils import load_image
from modules.version_info import (
versions_html,
#initialize_cuda,
#release_torch_resources,
#get_torch_info
)
from modules.image_utils import (
change_color,
open_image,
build_prerendered_images_by_quality,
upscale_image,
# lerp_imagemath,
# shrink_and_paste_on_blank,
show_lut,
apply_lut_to_image_path,
multiply_and_blend_images,
alpha_composite_with_control,
crop_and_resize_image,
convert_to_rgba_png,
get_image_from_dict
)
from modules.constants import (
LORA_DETAILS, LORAS as loras, MODELS,
default_lut_example_img,
lut_files,
MAX_SEED,
# lut_folder,cards,
# cards_alternating,
# card_colors,
# card_colors_alternating,
pre_rendered_maps_paths,
PROMPTS,
NEGATIVE_PROMPTS,
TARGET_SIZE,
temp_files,
load_env_vars,
dotenv_path
)
# from modules.excluded_colors import (
# add_color,
# delete_color,
# build_dataframe,
# on_input,
# excluded_color_list,
# on_color_display_select
# )
from modules.misc import (
get_filename,
convert_ratio_to_dimensions,
update_dimensions_on_ratio
)
from modules.lora_details import (
approximate_token_count,
split_prompt_precisely,
upd_prompt_notes_by_index,
get_trigger_words_by_index
)
import spaces
input_image_palette = []
current_prerendered_image = gr.State("./images/Beeuty-1.png")
user_info = {
"username": "guest",
"session_hash": None,
"headers": None,
"client": None,
"query_params": None,
"path_params": None,
"level" : 0
}
# Define a function to handle the login button click and retrieve user information.
def handle_login(request: gr.Request):
# Extract user information from the request
user_info = {
"username": request.username,
"session_hash": request.session_hash,
"headers": dict(request.headers),
"client": request.client,
"query_params": dict(request.query_params),
"path_params": dict(request.path_params),
"level" : (0 if request.username == "guest" else 2)
}
return user_info, gr.update(logout_value=f"Logout {user_info['username']} ({user_info['level']})", value=f"Login {user_info['username']} ({user_info['level']})")
#---if workspace = local or colab---
# Authenticate with Hugging Face
# from huggingface_hub import login
# Log in to Hugging Face using the provided token
# hf_token = 'hf-token-authentication'
# login(hf_token)
def calculate_shift(
image_seq_len,
base_seq_len: int = 256,
max_seq_len: int = 4096,
base_shift: float = 0.5,
max_shift: float = 1.16,
):
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
b = base_shift - m * base_seq_len
mu = image_seq_len * m + b
return mu
def retrieve_timesteps(
scheduler,
num_inference_steps: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
timesteps: Optional[List[int]] = None,
sigmas: Optional[List[float]] = None,
**kwargs,
):
if timesteps is not None and sigmas is not None:
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
if timesteps is not None:
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
elif sigmas is not None:
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
# FLUX pipeline
@torch.inference_mode()
def flux_pipe_call_that_returns_an_iterable_of_images(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
negative_prompt: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 28,
timesteps: List[int] = None,
guidance_scale: float = 3.5,
num_images_per_prompt: Optional[int] = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
max_sequence_length: int = 512,
good_vae: Optional[Any] = None,
):
height = height or self.default_sample_size * self.vae_scale_factor
width = width or self.default_sample_size * self.vae_scale_factor
self.check_inputs(
prompt,
prompt_2,
height,
width,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
max_sequence_length=max_sequence_length,
)
self._guidance_scale = guidance_scale
self._joint_attention_kwargs = joint_attention_kwargs
self._interrupt = False
batch_size = 1 if isinstance(prompt, str) else len(prompt)
device = self._execution_device
lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
device=device,
num_images_per_prompt=num_images_per_prompt,
max_sequence_length=max_sequence_length,
lora_scale=lora_scale,
)
num_channels_latents = self.transformer.config.in_channels // 4
latents, latent_image_ids = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
image_seq_len = latents.shape[1]
mu = calculate_shift(
image_seq_len,
self.scheduler.config.base_image_seq_len,
self.scheduler.config.max_image_seq_len,
self.scheduler.config.base_shift,
self.scheduler.config.max_shift,
)
timesteps, num_inference_steps = retrieve_timesteps(
self.scheduler,
num_inference_steps,
device,
timesteps,
sigmas,
mu=mu,
)
self._num_timesteps = len(timesteps)
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
for i, t in enumerate(timesteps):
if self.interrupt:
continue
timestep = t.expand(latents.shape[0]).to(latents.dtype)
print(f"Step {i + 1}/{num_inference_steps} - Timestep: {timestep.item()}\n")
noise_pred = self.transformer(
hidden_states=latents,
timestep=timestep / 1000,
guidance=guidance,
pooled_projections=pooled_prompt_embeds,
encoder_hidden_states=prompt_embeds,
txt_ids=text_ids,
img_ids=latent_image_ids,
joint_attention_kwargs=self.joint_attention_kwargs,
return_dict=False,
)[0]
latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
image = self.vae.decode(latents_for_image, return_dict=False)[0]
yield self.image_processor.postprocess(image, output_type=output_type)[0]
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
torch.cuda.empty_cache()
latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
image = good_vae.decode(latents, return_dict=False)[0]
self.maybe_free_model_hooks()
torch.cuda.empty_cache()
yield self.image_processor.postprocess(image, output_type=output_type)[0]
#--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------#
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
base_model = "black-forest-labs/FLUX.1-dev"
#TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
vae=good_vae,
transformer=pipe.transformer,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
text_encoder_2=pipe.text_encoder_2,
tokenizer_2=pipe.tokenizer_2,
torch_dtype=dtype
)
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
class calculateDuration:
def __init__(self, activity_name=""):
self.activity_name = activity_name
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
if self.activity_name:
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
else:
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
def update_selection(evt: gr.SelectData, width, height, aspect_ratio):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
new_aspect_ratio = aspect_ratio
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co./{lora_repo}) ✅"
# aspect will now use ratios if implemented, like 16:9, 4:3, 1:1, etc.
if "aspect" in selected_lora:
try:
new_aspect_ratio = selected_lora["aspect"]
width, height = update_dimensions_on_ratio(new_aspect_ratio, height)
except Exception as e:
print(f"\nError in update selection aspect ratios:{e}\nSkipping")
new_aspect_ratio = aspect_ratio
width = width
height = height
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index,
width,
height,
new_aspect_ratio,
upd_prompt_notes_by_index(evt.index)
)
@spaces.GPU(duration=120,progress=gr.Progress(track_tqdm=True))
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
pipe.to("cuda")
generator = torch.Generator(device="cuda").manual_seed(seed)
flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
if flash_attention_enabled:
pipe.attn_implementation="flash_attention_2"
# Compile UNet
#pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead")
pipe.vae.enable_tiling() # For larger resolutions if needed
# Disable unnecessary features
pipe.safety_checker = None
print(f"\nGenerating image with prompt: {prompt_mash}\n")
approx_tokens= approximate_token_count(prompt_mash)
if approx_tokens > 76:
print(f"\nSplitting prompt due to length: {approx_tokens}\n")
prompt, prompt2 = split_prompt_precisely(prompt_mash)
else:
prompt = prompt_mash
prompt2 = None
with calculateDuration("Generating image"):
# Generate image
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
prompt=prompt,
prompt_2=prompt2,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
output_type="pil",
good_vae=good_vae,
):
yield img
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed, progress):
generator = torch.Generator(device="cuda").manual_seed(seed)
pipe_i2i.to("cuda")
flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
if flash_attention_enabled:
pipe_i2i.attn_implementation="flash_attention_2"
# Compile UNet
#pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead")
pipe.vae.enable_tiling() # For larger resolutions if needed
# Disable unnecessary features
pipe.safety_checker = None
image_input = open_image(image_input_path)
print(f"\nGenerating image with prompt: {prompt_mash} and {image_input_path}\n")
approx_tokens= approximate_token_count(prompt_mash)
if approx_tokens > 76:
print(f"\nSplitting prompt due to length: {approx_tokens}\n")
prompt, prompt2 = split_prompt_precisely(prompt_mash)
else:
prompt = prompt_mash
prompt2 = None
final_image = pipe_i2i(
prompt=prompt,
prompt_2=prompt2,
image=image_input,
strength=image_strength,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
output_type="pil",
).images[0]
return final_image
@spaces.GPU(duration=140)
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, enlarge, use_conditioned_image=False, progress=gr.Progress(track_tqdm=True)):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.🧨")
print(f"input Image: {image_input}\n")
# handle selecting a conditioned image from the gallery
global current_prerendered_image
conditioned_image=None
if use_conditioned_image:
print(f"Conditioned path: {current_prerendered_image.value}.. converting to RGB\n")
# ensure the conditioned image is an image and not a string, cannot use RGBA
if isinstance(current_prerendered_image.value, str):
conditioned_image = open_image(current_prerendered_image.value).convert("RGB")
image_input = crop_and_resize_image(conditioned_image, width, height)
print(f"Conditioned Image: {image_input.size}.. converted to RGB and resized\n")
selected_lora = loras[selected_index]
lora_path = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
if(trigger_word):
if "trigger_position" in selected_lora:
if selected_lora["trigger_position"] == "prepend":
prompt_mash = f"{trigger_word} {prompt}"
else:
prompt_mash = f"{prompt} {trigger_word}"
else:
prompt_mash = f"{trigger_word} {prompt}"
else:
prompt_mash = prompt
with calculateDuration("Unloading LoRA"):
pipe.unload_lora_weights()
pipe_i2i.unload_lora_weights()
#LoRA weights flow
with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
pipe_to_use = pipe_i2i if image_input is not None else pipe
weight_name = selected_lora.get("weights", None)
pipe_to_use.load_lora_weights(
lora_path,
weight_name=weight_name,
low_cpu_mem_usage=True
)
with calculateDuration("Randomizing seed"):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
if(image_input is not None):
print(f"\nGenerating image to image with seed: {seed}\n")
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed, progress)
if enlarge:
upscaled_image = upscale_image(final_image, max(1.0,min((TARGET_SIZE[0]/width),(TARGET_SIZE[1]/height))))
# Save the upscaled image to a temporary file
with NamedTemporaryFile(delete=False, suffix=".png") as tmp_upscaled:
upscaled_image.save(tmp_upscaled.name, format="PNG")
temp_files.append(tmp_upscaled.name)
print(f"Upscaled image saved to {tmp_upscaled.name}")
final_image = tmp_upscaled.name
yield final_image, seed, gr.update(visible=False)
else:
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
final_image = None
step_counter = 0
for image in image_generator:
step_counter+=1
final_image = image
progress_bar = f'
'
yield image, seed, gr.update(value=progress_bar, visible=True)
if enlarge:
upscaled_image = upscale_image(final_image, max(1.0,min((TARGET_SIZE[0]/width),(TARGET_SIZE[1]/height))))
# Save the upscaled image to a temporary file
with NamedTemporaryFile(delete=False, suffix=".png") as tmp_upscaled:
upscaled_image.save(tmp_upscaled.name, format="PNG")
temp_files.append(tmp_upscaled.name)
print(f"Upscaled image saved to {tmp_upscaled.name}")
final_image = tmp_upscaled.name
yield final_image, seed, gr.update(value=progress_bar, visible=False)
def get_huggingface_safetensors(link):
split_link = link.split("/")
if(len(split_link) == 2):
model_card = ModelCard.load(link)
base_model = model_card.data.get("base_model")
print(base_model)
#Allows Both
if base_model not in MODELS:
#if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
raise Exception("Flux LoRA Not Found!")
# Only allow "black-forest-labs/FLUX.1-dev"
#if base_model != "black-forest-labs/FLUX.1-dev":
#raise Exception("Only FLUX.1-dev is supported, other LoRA models are not allowed!")
image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
trigger_word = model_card.data.get("instance_prompt", "")
image_url = f"https://huggingface.co./{link}/resolve/main/{image_path}" if image_path else None
fs = HfFileSystem()
try:
list_of_files = fs.ls(link, detail=False)
for file in list_of_files:
if(file.endswith(".safetensors")):
safetensors_name = file.split("/")[-1]
if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
image_elements = file.split("/")
image_url = f"https://huggingface.co./{link}/resolve/main/{image_elements[-1]}"
except Exception as e:
print(e)
gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
return split_link[1], link, safetensors_name, trigger_word, image_url
def check_custom_model(link):
if(link.startswith("https://")):
if(link.startswith("https://huggingface.co.") or link.startswith("https://www.huggingface.co")):
link_split = link.split("huggingface.co/")
return get_huggingface_safetensors(link_split[1])
else:
return get_huggingface_safetensors(link)
def add_custom_lora(custom_lora):
global loras
if(custom_lora):
try:
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
print(f"Loaded custom LoRA: {repo}")
card = f'''
Loaded custom LoRA:
{title}
{"Using: "+trigger_word+"
as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}
'''
existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
if(not existing_item_index):
new_item = {
"image": image,
"title": title,
"repo": repo,
"weights": path,
"trigger_word": trigger_word
}
print(new_item)
existing_item_index = len(loras)
loras.append(new_item)
return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
except Exception as e:
gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=False), gr.update(), "", None, ""
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def remove_custom_lora():
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def on_prerendered_gallery_selection(event_data: gr.SelectData):
global current_prerendered_image
selected_index = event_data.index
selected_image = pre_rendered_maps_paths[selected_index]
print(f"Gallery Image Selected: {selected_image}\n")
current_prerendered_image.value = selected_image
return current_prerendered_image
def update_prompt_visibility(map_option):
is_visible = (map_option == "Prompt")
return (
gr.update(visible=is_visible),
gr.update(visible=is_visible),
gr.update(visible=is_visible)
)
def replace_input_with_sketch_image(sketch_image):
print(f"Sketch Image: {sketch_image}\n")
sketch, is_dict = get_image_from_dict(sketch_image)
return sketch
@spaces.GPU()
def getVersions():
return versions_html()
run_lora.zerogpu = True
gr.set_static_paths(paths=["images/","images/images","images/prerendered","LUT/","fonts/", "assets/"])
title = "Hex Game Maker"
with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty', delete_cache=(43200, 43200), head_paths="head.htm") as app:
with gr.Row():
gr.Markdown("""
# Hex Game Maker Development Features
## This project includes features that did not make it into the main project! ⬢""", elem_classes="intro")
with gr.Row():
with gr.Accordion("Welcome to Hex Game Maker, the ultimate tool for transforming your images into stunning hexagon grid artworks. Whether you're a tabletop game enthusiast, a digital artist, or someone who loves unique patterns, Hex Game Maker has something for you.", open=False, elem_classes="intro"):
gr.Markdown ("""
## Drop an image into the Input Image and get started!
## What is Hex Game Maker?
Hex Game Maker is a web-based application that allows you to apply a hexagon grid overlay to any image. You can customize the size, color, and opacity of the hexagons, as well as the background and border colors. The result is a visually striking image that looks like it was made from hexagonal tiles!
### What Can You Do?
- **Generate Hexagon Grids:** Create beautiful hexagon grid overlays on any image with fully customizable parameters.
- **AI-Powered Image Generation:** Use advanced AI models to generate images based on your prompts and apply hexagon grids to them.
- **Color Exclusion:** Select and exclude specific colors from your hexagon grid for a cleaner and more refined look.
- **Interactive Customization:** Adjust hexagon size, border size, rotation, background color, and more in real-time.
- **Depth and 3D Model Generation:** Generate depth maps and 3D models from your images for enhanced visualization.
- **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
- **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
- **Add Margins:** Add customizable margins around your images for a polished finish.
### Why You'll Love It
- **Fun and Easy to Use:** With an intuitive interface and real-time previews, creating hexagon grids has never been this fun!
- **Endless Creativity:** Unleash your creativity with endless customization options and see your images transform in unique ways.
- **Hexagon-Inspired Theme:** Enjoy a delightful yellow and purple theme inspired by hexagons! ⬢
- **Advanced AI Models:** Leverage advanced AI models and LoRA weights for high-quality image generation and customization.
### Get Started
1. **Upload or Generate an Image:** Start by uploading your own image or generate one using our AI-powered tool.
2. **Customize Your Grid:** Play around with the settings to create the perfect hexagon grid overlay.
3. **Download and Share:** Once you're happy with your creation, download it and share it with the world!
### Advanced Features
- **Generative AI Integration:** Utilize models like `black-forest-labs/FLUX.1-dev` and various LoRA weights for generating unique images.
- **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
- **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
- **Depth and 3D Model Generation:** Create depth maps and 3D models from your images for enhanced visualization.
- **Add Margins:** Customize margins around your images for a polished finish.
Join the hive and start creating with Hex Game Maker today!
""", elem_classes="intro")
selected_index = gr.State(None)
with gr.Row():
with gr.Column(scale=2):
progress_bar = gr.Markdown(elem_id="progress",visible=False)
input_image = gr.Image(
label="Input Image",
type="filepath",
interactive=True,
elem_classes="centered solid imgcontainer",
key="imgInput",
image_mode="RGB",
format="PNG"
)
def on_input_image_change(image_path):
if image_path is None:
gr.Warning("Please upload an Input Image to get started.")
return None
img, img_path = convert_to_rgba_png(image_path)
return img_path
input_image.input(
fn=on_input_image_change,
inputs=[input_image],
outputs=[input_image], scroll_to_output=True,
)
with gr.Column(scale=0):
with gr.Accordion("Sketch Pad (WIP)", open = False):
with gr.Row():
sketch_image = gr.Sketchpad(
label="Sketch Image",
type="filepath",
#invert_colors=True,
#source=['upload','canvas'],
#tool=['editor','select','color-sketch'],
placeholder="Draw a sketch or upload an image. Currently broken in gradio 5.17.1",
interactive=True,
elem_classes="centered solid imgcontainer",
key="imgSketch",
image_mode="RGB",
format="PNG",
width=512, # Default width
height=512 # Default height
)
with gr.Row():
with gr.Column(scale=1):
sketch_replace_input_image_button = gr.Button(
"Replace Input Image with sketch",
elem_id="sketch_replace_input_image_button",
elem_classes="solid"
)
with gr.Column(scale=2):
alpha_composite_slider = gr.Slider(0,100,50,0.5, label="Alpha Composite Sketch to Input Image", elem_id="alpha_composite_slider")
with gr.Accordion("Image Filters", open = False):
with gr.Row():
with gr.Column():
composite_color = gr.ColorPicker(label="Color", value="#ede9ac44")
composite_opacity = gr.Slider(label="Opacity %", minimum=0, maximum=100, value=50, interactive=True)
with gr.Row():
composite_button = gr.Button("Composite", elem_classes="solid")
with gr.Row():
with gr.Column():
lut_filename = gr.Textbox(
value="",
label="Look Up Table (LUT) File Name",
elem_id="lutFileName")
with gr.Column():
lut_file = gr.File(
value=None,
file_count="single",
file_types=[".cube"],
type="filepath",
label="LUT cube File")
with gr.Row():
lut_example_image = gr.Image(type="pil", label="Filter (LUT) Example Image", value=default_lut_example_img)
with gr.Row():
with gr.Column():
gr.Markdown("""
### Included Filters (LUTs)
There are several included Filters:
Try them on the example image before applying to your Input Image.
""", elem_id="lut_markdown")
with gr.Column():
gr.Examples(elem_id="lut_examples",
examples=[[f] for f in lut_files],
inputs=[lut_filename],
outputs=[lut_filename],
label="Select a Filter (LUT) file. Populate the LUT File Name field"
)
with gr.Row():
apply_lut_button = gr.Button("Apply Filter (LUT)", elem_classes="solid", elem_id="apply_lut_button")
lut_file.change(get_filename, inputs=[lut_file], outputs=[lut_filename])
lut_filename.change(show_lut, inputs=[lut_filename, lut_example_image], outputs=[lut_example_image])
apply_lut_button.click(
lambda lut_filename, input_image: gr.Warning("Please upload an Input Image to get started.") if input_image is None else apply_lut_to_image_path(lut_filename, input_image)[0],
inputs=[lut_filename, input_image],
outputs=[input_image],
scroll_to_output=True
)
with gr.Row():
with gr.Accordion("Generative AI", open = True ):
with gr.Column():
map_options = gr.Dropdown(
label="Map Options*",
choices=list(PROMPTS.keys()),
value="Alien Landscape",
elem_classes="solid",
scale=0
)
prompt = gr.Textbox(
label="Prompt",
visible=False,
elem_classes="solid",
value="top-down, (rectangular tabletop_map) alien planet map, Battletech_boardgame scifi world with forests, lakes, oceans, continents and snow at the top and bottom, (middle is dark, no_reflections, no_shadows), from directly above. From 100,000 feet looking straight down",
lines=4
)
negative_prompt_textbox = gr.Textbox(
label="Negative Prompt",
visible=False,
elem_classes="solid",
value="Earth, low quality, bad anatomy, blurry, cropped, worst quality, shadows, people, humans, reflections, shadows, realistic map of the Earth, isometric, text"
)
prompt_notes_label = gr.Label(
"Choose a LoRa style or add an image. YOU MUST CLEAR THE IMAGE TO START OVER ",
elem_classes="solid centered small",
show_label=False,
visible=False
)
# Keep the change event to maintain functionality
map_options.change(
fn=update_prompt_visibility,
inputs=[map_options],
outputs=[prompt, negative_prompt_textbox, prompt_notes_label]
)
with gr.Row():
with gr.Column(scale=1):
generate_button = gr.Button("Generate From Map Options, Input Image and LoRa Style", variant="primary", elem_id="gen_btn")
with gr.Accordion("LoRA Styles*", open=False):
selected_info = gr.Markdown("")
lora_gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="LoRA Styles",
allow_preview=False,
columns=3,
elem_id="lora_gallery",
show_share_button=False
)
with gr.Accordion("Custom LoRA", open=False):
with gr.Group():
custom_lora = gr.Textbox(label="Enter Custom LoRA. **NOT TESTED**", placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime")
gr.Markdown("[Check the list of FLUX LoRA's](https://huggingface.co./models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
custom_lora_info = gr.HTML(visible=False)
custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
with gr.Column(scale=2):
generate_input_image_from_gallery = gr.Button(
"Generate AI Image from Template Image",
elem_id="generate_input_image_from_gallery",
elem_classes="solid",
variant="primary"
)
with gr.Accordion("Template Images", open = False):
with gr.Row():
with gr.Column(scale=1):
# Gallery from PRE_RENDERED_IMAGES GOES HERE
prerendered_image_gallery = gr.Gallery(label="Template Gallery", show_label=True, value=build_prerendered_images_by_quality(3,'thumbnail'), elem_id="gallery", elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
with gr.Column(scale=1):
# def handle_login(request: gr.Request):
# # Extract user information from the request
# user_info = {
# "username": request.username,
# "session_hash": request.session_hash,
# "headers": dict(request.headers),
# "client": request.client,
# "query_params": dict(request.query_params),
# "path_params": dict(request.path_params)
# }
# print(f"\n{user_info}\n")
# return user_info
replace_input_image_button = gr.Button(
"Replace Input Image",
elem_id="prerendered_replace_input_image_button",
elem_classes="solid"
)
# login_button = gr.LoginButton()
# user_info_output = gr.JSON(label="User Information")
# login_button.click(fn=handle_login, inputs=[], outputs=user_info_output)
with gr.Row():
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
image_strength = gr.Slider(label="Image Guidance Strength (prompt percentage)", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.85)
with gr.Column():
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=5.0)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=30)
with gr.Row():
negative_prompt_textbox = gr.Textbox(
label="Negative Prompt",
visible=False,
elem_classes="solid",
value="Earth, low quality, bad anatomy, blurry, cropped, worst quality, shadows, people, humans, reflections, shadows, realistic map of the Earth, isometric, text"
)
# Add Dropdown for sizing of Images, height and width based on selection. Options are 16x9, 16x10, 4x5, 1x1
# The values of height and width are based on common resolutions for each aspect ratio
# Default to 16x9, 1024x576
image_size_ratio = gr.Dropdown(label="Image Aspect Ratio", choices=["16:9", "16:10", "4:5", "4:3", "2:1","3:2","1:1", "9:16", "10:16", "5:4", "3:4","1:2", "2:3"], value="16:9", elem_classes="solid", type="value", scale=0, interactive=True)
width = gr.Slider(label="Width", minimum=256, maximum=2560, step=16, value=1024, interactive=False)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=512)
enlarge_to_default = gr.Checkbox(label="Auto Enlarge to Default Size", value=False)
image_size_ratio.change(
fn=update_dimensions_on_ratio,
inputs=[image_size_ratio, height],
outputs=[width, height]
)
height.change(
fn=lambda *args: update_dimensions_on_ratio(*args)[0],
inputs=[image_size_ratio, height],
outputs=[width]
)
with gr.Row():
randomize_seed = gr.Checkbox(False, label="Randomize seed",elem_id="rnd_seed_chk")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True, elem_id="rnd_seed")
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=1.01)
with gr.Row():
login_button = gr.LoginButton(logout_value=f"Logout({user_info['username']} ({user_info['level']}))", size="md", elem_classes="solid centered", elem_id="hf_login_btn", icon="./assets/favicon.ico")
# Create a JSON component to display the user information
user_info_output = gr.JSON(label="User Information:")
# Set up the event listener for the login button click
login_button.click(fn=handle_login, inputs=[], outputs=[user_info_output, login_button])
with gr.Row():
gr.HTML(value=getVersions(), visible=True, elem_id="versions")
# Event Handlers
composite_button.click(
fn=lambda input_image, composite_color, composite_opacity: gr.Warning("Please upload an Input Image to get started.") if input_image is None else change_color(input_image, composite_color, composite_opacity),
inputs=[input_image, composite_color, composite_opacity],
outputs=[input_image]
)
#use conditioned_image as the input_image for generate_input_image_click
generate_input_image_from_gallery.click(
fn=run_lora,
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, enlarge_to_default, gr.State(True)],
outputs=[input_image, seed, progress_bar], scroll_to_output=True
)
prerendered_image_gallery.select(
fn=on_prerendered_gallery_selection,
inputs=None,
outputs=gr.State(current_prerendered_image), # Update the state with the selected image
show_api=False, scroll_to_output=True
)
alpha_composite_slider.change(
fn=alpha_composite_with_control,
inputs=[input_image, sketch_image, alpha_composite_slider],
outputs=[input_image],
scroll_to_output=True
)
sketch_replace_input_image_button.click(
lambda sketch_image: replace_input_with_sketch_image(sketch_image),
inputs=[sketch_image],
outputs=[input_image], scroll_to_output=True
)
# replace input image with selected prerendered image gallery selection
replace_input_image_button.click(
lambda: current_prerendered_image.value,
inputs=None,
outputs=[input_image], scroll_to_output=True
)
lora_gallery.select(
update_selection,
inputs=[width, height, image_size_ratio],
outputs=[prompt, selected_info, selected_index, width, height, image_size_ratio, prompt_notes_label]
)
custom_lora.input(
add_custom_lora,
inputs=[custom_lora],
outputs=[custom_lora_info, custom_lora_button, lora_gallery, selected_info, selected_index, prompt]
)
custom_lora_button.click(
remove_custom_lora,
outputs=[custom_lora_info, custom_lora_button, lora_gallery, selected_info, selected_index, custom_lora]
)
gr.on(
triggers=[generate_button.click, prompt.submit],
fn=run_lora,
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, enlarge_to_default, gr.State(False)],
outputs=[input_image, seed, progress_bar]
)
load_env_vars(dotenv_path)
logging.basicConfig(
format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
)
logging.info("Environment Variables: %s" % os.environ)
app.queue()
app.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")