Surn commited on
Commit
6dd859c
·
1 Parent(s): ab4cf94

Reverse Changes

Browse files
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import os
2
  # Import constants
 
 
3
  import utils.constants as constants
4
  import gradio as gr
5
  from PIL import Image
@@ -10,6 +12,9 @@ from tempfile import NamedTemporaryFile
10
  import atexit
11
  import random
12
  import logging
 
 
 
13
 
14
  IS_SHARED_SPACE = constants.IS_SHARED_SPACE
15
 
@@ -57,13 +62,25 @@ from utils.excluded_colors import (
57
  # )
58
  from utils.version_info import (
59
  versions_html,
60
- initialize_cuda,
61
- release_torch_resources,
62
  get_torch_info
63
  )
64
  from utils.lora_details import (
65
- upd_prompt_notes
 
 
 
66
  )
 
 
 
 
 
 
 
 
 
67
 
68
  input_image_palette = []
69
  current_prerendered_image = gr.State("./images/images/Beeuty-1.png")
@@ -162,324 +179,313 @@ def get_model_and_lora(model_textbox):
162
  default_model = model_textbox
163
  return default_model, []
164
 
165
-
166
- def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=(8/3), progress=gr.Progress(track_tqdm=True)):
167
- import spaces
168
- from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
169
- import accelerate
170
- from transformers import AutoTokenizer
171
- from utils.lora_details import get_trigger_words, approximate_token_count, split_prompt_precisely
172
- import gc
173
- PIPELINE_CLASSES = {
174
- "FluxPipeline": FluxPipeline,
175
- "FluxImg2ImgPipeline": FluxImg2ImgPipeline,
176
- "FluxControlPipeline": FluxControlPipeline
177
- }
178
- if randomize_seed:
179
- seed = random.randint(0, constants.MAX_SEED)
180
-
181
- @spaces.GPU(progress=gr.Progress(track_tqdm=True))
182
- def generate_image_lowmem(
183
- text,
184
- neg_prompt=None,
185
- model_name="black-forest-labs/FLUX.1-dev",
186
- lora_weights=None,
187
- conditioned_image=None,
188
- image_width=1368,
189
- image_height=848,
190
- guidance_scale=3.5,
191
- num_inference_steps=30,
192
- seed=0,
193
- true_cfg_scale=1.0,
194
- pipeline_name="FluxPipeline",
195
- strength=0.75,
196
- additional_parameters=None,
197
- progress=gr.Progress(track_tqdm=True)
198
- ):
199
- from torch import cuda, bfloat16, float32, Generator, no_grad, backends
200
- # Retrieve the pipeline class from the mapping
201
- pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
202
- if not pipeline_class:
203
- raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
204
- f"Available options: {list(PIPELINE_CLASSES.keys())}")
205
-
206
- #initialize_cuda()
207
- device = "cuda" if cuda.is_available() else "cpu"
208
- from src.condition import Condition
209
-
210
- print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
211
- print(f"\n {get_torch_info()}\n")
212
- # Disable gradient calculations
213
- with no_grad():
214
- # Initialize the pipeline inside the context manager
215
- pipe = pipeline_class.from_pretrained(
216
- model_name,
217
- torch_dtype=bfloat16 if device == "cuda" else float32
218
- ).to(device)
219
- # Optionally, don't use CPU offload if not necessary
220
 
221
- # alternative version that may be more efficient
222
- # pipe.enable_sequential_cpu_offload()
223
- if pipeline_name == "FluxPipeline":
224
- pipe.enable_model_cpu_offload()
225
- pipe.vae.enable_slicing()
226
- pipe.vae.enable_tiling()
227
- else:
228
- pipe.enable_model_cpu_offload()
229
-
230
- # Access the tokenizer from the pipeline
231
- tokenizer = pipe.tokenizer
232
-
233
- # Check if add_prefix_space is set and convert to slow tokenizer if necessary
234
- if getattr(tokenizer, 'add_prefix_space', False):
235
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, device_map = 'cpu')
236
- # Update the pipeline's tokenizer
237
- pipe.tokenizer = tokenizer
238
- pipe.to(device)
239
-
240
- flash_attention_enabled = backends.cuda.flash_sdp_enabled()
241
- if flash_attention_enabled == False:
242
- #Enable xFormers memory-efficient attention (optional)
243
- #pipe.enable_xformers_memory_efficient_attention()
244
- print("\nEnabled xFormers memory-efficient attention.\n")
245
- else:
246
- pipe.attn_implementation="flash_attention_2"
247
- print("\nEnabled flash_attention_2.\n")
248
-
249
- condition_type = "subject"
250
- # Load LoRA weights
251
- # note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
252
- if lora_weights:
253
- for lora_weight in lora_weights:
254
- lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
255
- lora_weight_set = False
256
- if lora_configs:
257
- for config in lora_configs:
258
- # Load LoRA weights with optional weight_name and adapter_name
259
- if 'weight_name' in config:
260
- weight_name = config.get("weight_name")
261
- adapter_name = config.get("adapter_name")
262
- lora_collection = config.get("lora_collection")
263
- if weight_name and adapter_name and lora_collection and lora_weight_set == False:
264
- pipe.load_lora_weights(
265
- lora_collection,
266
- weight_name=weight_name,
267
- adapter_name=adapter_name,
268
- token=constants.HF_API_TOKEN
269
- )
270
- lora_weight_set = True
271
- print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
272
- elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
273
- pipe.load_lora_weights(
274
- lora_collection,
275
- weight_name=weight_name,
276
- token=constants.HF_API_TOKEN
277
- )
278
- lora_weight_set = True
279
- print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
280
- elif weight_name and adapter_name and lora_weight_set == False:
281
- pipe.load_lora_weights(
282
- lora_weight,
283
- weight_name=weight_name,
284
- adapter_name=adapter_name,
285
- token=constants.HF_API_TOKEN
286
- )
287
- lora_weight_set = True
288
- print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
289
- elif weight_name and adapter_name==None and lora_weight_set == False:
290
- pipe.load_lora_weights(
291
- lora_weight,
292
- weight_name=weight_name,
293
- token=constants.HF_API_TOKEN
294
- )
295
- lora_weight_set = True
296
- print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
297
- elif lora_weight_set == False:
298
- pipe.load_lora_weights(
299
- lora_weight,
300
- token=constants.HF_API_TOKEN
301
- )
302
- lora_weight_set = True
303
- print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
304
- # Apply 'pipe' configurations if present
305
- if 'pipe' in config:
306
- pipe_config = config['pipe']
307
- for method_name, params in pipe_config.items():
308
- method = getattr(pipe, method_name, None)
309
- if method:
310
- print(f"Applying pipe method: {method_name} with params: {params}")
311
- method(**params)
312
- else:
313
- print(f"Method {method_name} not found in pipe.")
314
- if 'condition_type' in config:
315
- condition_type = config['condition_type']
316
- if condition_type == "coloring":
317
- #pipe.enable_coloring()
318
- print("\nEnabled coloring.\n")
319
- elif condition_type == "deblurring":
320
- #pipe.enable_deblurring()
321
- print("\nEnabled deblurring.\n")
322
- elif condition_type == "fill":
323
- #pipe.enable_fill()
324
- print("\nEnabled fill.\n")
325
- elif condition_type == "depth":
326
- #pipe.enable_depth()
327
- print("\nEnabled depth.\n")
328
- elif condition_type == "canny":
329
- #pipe.enable_canny()
330
- print("\nEnabled canny.\n")
331
- elif condition_type == "subject":
332
- #pipe.enable_subject()
333
- print("\nEnabled subject.\n")
334
  else:
335
- print(f"Condition type {condition_type} not implemented.")
336
- else:
337
- pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
338
- # Set the random seed for reproducibility
339
- generator = Generator(device=device).manual_seed(seed)
340
- conditions = []
341
- if conditioned_image is not None:
342
- conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
343
- condition = Condition(condition_type, conditioned_image)
344
- conditions.append(condition)
345
- print(f"\nAdded conditioned image.\n {conditioned_image.size}")
346
- # Prepare the parameters for image generation
347
- additional_parameters ={
348
- "strength": strength,
349
- "image": conditioned_image,
350
- }
351
- else:
352
- print("\nNo conditioned image provided.")
353
- if neg_prompt!=None:
354
- true_cfg_scale=1.1
355
- additional_parameters ={
356
- "negative_prompt": neg_prompt,
357
- "true_cfg_scale": true_cfg_scale,
358
- }
359
- # handle long prompts by splitting them
360
- if approximate_token_count(text) > 76:
361
- prompt, prompt2 = split_prompt_precisely(text)
362
- prompt_parameters = {
363
- "prompt" : prompt,
364
- "prompt_2": prompt2
365
- }
366
- else:
367
- prompt_parameters = {
368
- "prompt" :text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
  }
370
- additional_parameters.update(prompt_parameters)
371
- # Combine all parameters
372
- generate_params = {
373
- "height": image_height,
374
- "width": image_width,
375
- "guidance_scale": guidance_scale,
376
- "num_inference_steps": num_inference_steps,
377
- "generator": generator, }
378
- if additional_parameters:
379
- generate_params.update(additional_parameters)
380
- generate_params = {k: v for k, v in generate_params.items() if v is not None}
381
- print(f"generate_params: {generate_params}")
382
- # Generate the image
383
- result = pipe(**generate_params)
384
- image = result.images[0]
385
- # Clean up
386
- del result
387
- del conditions
388
- del generator
389
- # Delete the pipeline and clear cache
390
- del pipe
391
- cuda.empty_cache()
392
- cuda.ipc_collect()
393
- print(cuda.memory_summary(device=None, abbreviated=False))
 
 
 
 
394
 
395
- return image
396
-
397
- #@spaces.GPU(progress=gr.Progress(track_tqdm=True))
398
- def generate_ai_image_local (
399
- map_option,
400
- prompt_textbox_value,
401
- neg_prompt_textbox_value,
402
- model="black-forest-labs/FLUX.1-dev",
403
- lora_weights=None,
404
- conditioned_image=None,
405
- height=512,
406
- width=912,
407
- num_inference_steps=30,
408
- guidance_scale=3.5,
409
- seed=777,
410
- pipeline_name="FluxPipeline",
411
- strength=0.75,
412
- progress=gr.Progress(track_tqdm=True)
413
- ):
414
- print(f"Generating image with lowmem")
415
- try:
416
- if map_option != "Prompt":
417
- prompt = constants.PROMPTS[map_option]
418
- negative_prompt = constants.NEGATIVE_PROMPTS.get(map_option, "")
419
- else:
420
- prompt = prompt_textbox_value
421
- negative_prompt = neg_prompt_textbox_value or ""
422
- #full_prompt = f"{prompt} {negative_prompt}"
423
- additional_parameters = {}
424
- if lora_weights:
425
- for lora_weight in lora_weights:
426
- lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
427
- for config in lora_configs:
428
- if 'parameters' in config:
429
- additional_parameters.update(config['parameters'])
430
- elif 'trigger_words' in config:
431
- trigger_words = get_trigger_words(lora_weight)
432
- prompt = f"{trigger_words} {prompt}"
433
- for key, value in additional_parameters.items():
434
- if key in ['height', 'width', 'num_inference_steps', 'max_sequence_length']:
435
- additional_parameters[key] = int(value)
436
- elif key in ['guidance_scale','true_cfg_scale']:
437
- additional_parameters[key] = float(value)
438
- height = additional_parameters.pop('height', height)
439
- width = additional_parameters.pop('width', width)
440
- num_inference_steps = additional_parameters.pop('num_inference_steps', num_inference_steps)
441
- guidance_scale = additional_parameters.pop('guidance_scale', guidance_scale)
442
- print("Generating image with the following parameters:")
443
- print(f"Model: {model}")
444
- print(f"LoRA Weights: {lora_weights}")
445
- print(f"Prompt: {prompt}")
446
- print(f"Neg Prompt: {negative_prompt}")
447
- print(f"Height: {height}")
448
- print(f"Width: {width}")
449
- print(f"Number of Inference Steps: {num_inference_steps}")
450
- print(f"Guidance Scale: {guidance_scale}")
451
- print(f"Seed: {seed}")
452
- print(f"Additional Parameters: {additional_parameters}")
453
- print(f"Conditioned Image: {conditioned_image}")
454
- print(f"Conditioned Image Strength: {strength}")
455
- print(f"pipeline: {pipeline_name}")
456
- image = generate_image_lowmem(
457
- text=prompt,
458
- model_name=model,
459
- neg_prompt=negative_prompt,
460
- lora_weights=lora_weights,
461
- conditioned_image=conditioned_image,
462
- image_width=width,
463
- image_height=height,
464
- guidance_scale=guidance_scale,
465
- num_inference_steps=num_inference_steps,
466
- seed=seed,
467
- pipeline_name=pipeline_name,
468
- strength=strength,
469
- additional_parameters=additional_parameters
470
- )
471
- with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
472
- image.save(tmp.name, format="PNG")
473
- constants.temp_files.append(tmp.name)
474
- print(f"Image saved to {tmp.name}")
475
- release_torch_resources()
476
- gc.collect()
477
- return tmp.name
478
- except Exception as e:
479
- print(f"Error generating AI image: {e}")
480
- release_torch_resources()
481
  gc.collect()
482
- return None
 
 
 
 
 
 
 
 
 
 
 
483
  # Get the model and LoRA weights
484
  model, lora_weights = get_model_and_lora(model_textbox_value)
485
  global current_prerendered_image
@@ -565,7 +571,12 @@ def add_border(image, mask_width, mask_height, blank_color):
565
  margin_color = detect_color_format(blank_color)
566
  print(f"Adding border to image with width: {mask_width}, height: {mask_height}, color: {margin_color}")
567
  return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
568
- def main(debug):
 
 
 
 
 
569
  title = "HexaGrid Creator"
570
  #description = "Customizable Hexagon Grid Image Generator"
571
  examples = [["assets//examples//hex_map_p1.png", 32, 1, 0, 0, 0, 0, 0, "#ede9ac44","#12165380", True]]
@@ -791,7 +802,7 @@ def main(debug):
791
  # Gallery from PRE_RENDERED_IMAGES GOES HERE
792
  prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images(constants.pre_rendered_maps_paths), elem_id="gallery", elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
793
  with gr.Row():
794
- image_guidance_stength = gr.Slider(label="Image Guidance Strength", minimum=0, maximum=1.0, value=0.25, step=0.01, interactive=True)
795
  with gr.Column():
796
  replace_input_image_button = gr.Button(
797
  "Replace Input Image",
@@ -888,35 +899,35 @@ def main(debug):
888
  ],
889
  inputs=[input_image, filter_color, fill_hex, start_x, start_y, end_x, end_y, x_spacing, y_spacing, hex_size, rotation, border_size, border_color, border_opacity],
890
  elem_id="examples")
891
- #with gr.Row():
892
- #gr.HTML(value=versions_html(), visible=True, elem_id="versions")
893
  with gr.Row():
894
- reinstall_torch = gr.Button("Reinstall Torch", elem_classes="solid small", variant="secondary")
895
- reinstall_cuda_toolkit = gr.Button("Install CUDA Toolkit", elem_classes="solid small", variant="secondary")
896
- reinitialize_cuda = gr.Button("Reinitialize CUDA", elem_classes="solid small", variant="secondary")
897
- torch_release = gr.Button("Release Torch Resources", elem_classes="solid small", variant="secondary")
898
-
899
- reinitialize_cuda.click(
900
- fn=initialize_cuda,
901
- inputs=[],
902
- outputs=[]
903
- )
904
- torch_release.click(
905
- fn=release_torch_resources,
906
- inputs=[],
907
- outputs=[]
908
- )
909
- reinstall_torch.click(
910
- fn=install_torch,
911
- inputs=[],
912
- outputs=[]
913
- )
914
-
915
- reinstall_cuda_toolkit.click(
916
- fn=install_cuda_toolkit,
917
- inputs=[],
918
- outputs=[]
919
- )
 
 
920
 
921
  color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
922
  color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
@@ -1004,8 +1015,8 @@ def main(debug):
1004
  outputs=[bordered_image_output],
1005
  scroll_to_output=True
1006
  )
1007
- (())
1008
- beeuty.queue(default_concurrency_limit=1,max_size=12,api_open=False)
1009
  beeuty.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")
1010
 
1011
 
@@ -1014,12 +1025,13 @@ if __name__ == "__main__":
1014
  format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
1015
  )
1016
  logging.info("Environment Variables: %s" % os.environ)
1017
- if _get_output(["nvcc", "--version"]) is None:
1018
- logging.info("Installing CUDA toolkit...")
1019
- install_cuda_toolkit()
1020
- else:
1021
- logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
1022
-
1023
- logging.info("Installing CUDA extensions...")
1024
- setup_runtime_env()
1025
- main(os.getenv("DEBUG") == "1")
 
 
1
  import os
2
  # Import constants
3
+ import numpy as np
4
+ import torch
5
  import utils.constants as constants
6
  import gradio as gr
7
  from PIL import Image
 
12
  import atexit
13
  import random
14
  import logging
15
+ import accelerate
16
+ from transformers import AutoTokenizer
17
+ import gc
18
 
19
  IS_SHARED_SPACE = constants.IS_SHARED_SPACE
20
 
 
62
  # )
63
  from utils.version_info import (
64
  versions_html,
65
+ #initialize_cuda,
66
+ #release_torch_resources,
67
  get_torch_info
68
  )
69
  from utils.lora_details import (
70
+ upd_prompt_notes,
71
+ split_prompt_precisely,
72
+ approximate_token_count,
73
+ get_trigger_words
74
  )
75
+ from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
76
+
77
+ PIPELINE_CLASSES = {
78
+ "FluxPipeline": FluxPipeline,
79
+ "FluxImg2ImgPipeline": FluxImg2ImgPipeline,
80
+ "FluxControlPipeline": FluxControlPipeline
81
+ }
82
+
83
+ import spaces
84
 
85
  input_image_palette = []
86
  current_prerendered_image = gr.State("./images/images/Beeuty-1.png")
 
179
  default_model = model_textbox
180
  return default_model, []
181
 
182
+ @spaces.GPU(progress=gr.Progress(track_tqdm=True))
183
+ def generate_image_lowmem(
184
+ text,
185
+ neg_prompt=None,
186
+ model_name="black-forest-labs/FLUX.1-dev",
187
+ lora_weights=None,
188
+ conditioned_image=None,
189
+ image_width=1368,
190
+ image_height=848,
191
+ guidance_scale=3.5,
192
+ num_inference_steps=30,
193
+ seed=0,
194
+ true_cfg_scale=1.0,
195
+ pipeline_name="FluxPipeline",
196
+ strength=0.75,
197
+ additional_parameters=None,
198
+ progress=gr.Progress(track_tqdm=True)
199
+ ):
200
+ #from torch import cuda, bfloat16, float32, Generator, no_grad, backends
201
+ # Retrieve the pipeline class from the mapping
202
+ pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
203
+ if not pipeline_class:
204
+ raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
205
+ f"Available options: {list(PIPELINE_CLASSES.keys())}")
206
+
207
+ #initialize_cuda()
208
+ device = "cuda" if torch.cuda.is_available() else "cpu"
209
+ from src.condition import Condition
210
+
211
+ print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
212
+ #print(f"\n {get_torch_info()}\n")
213
+ # Disable gradient calculations
214
+ with torch.no_grad():
215
+ # Initialize the pipeline inside the context manager
216
+ pipe = pipeline_class.from_pretrained(
217
+ model_name,
218
+ torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
219
+ ).to(device)
220
+ # Optionally, don't use CPU offload if not necessary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
+ # alternative version that may be more efficient
223
+ # pipe.enable_sequential_cpu_offload()
224
+ if pipeline_name == "FluxPipeline":
225
+ pipe.enable_model_cpu_offload()
226
+ pipe.vae.enable_slicing()
227
+ pipe.vae.enable_tiling()
228
+ else:
229
+ pipe.enable_model_cpu_offload()
230
+
231
+ # Access the tokenizer from the pipeline
232
+ tokenizer = pipe.tokenizer
233
+
234
+ # Check if add_prefix_space is set and convert to slow tokenizer if necessary
235
+ if getattr(tokenizer, 'add_prefix_space', False):
236
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, device_map = 'cpu')
237
+ # Update the pipeline's tokenizer
238
+ pipe.tokenizer = tokenizer
239
+ pipe.to(device)
240
+
241
+ flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
242
+ if flash_attention_enabled == False:
243
+ #Enable xFormers memory-efficient attention (optional)
244
+ #pipe.enable_xformers_memory_efficient_attention()
245
+ print("\nEnabled xFormers memory-efficient attention.\n")
246
+ else:
247
+ pipe.attn_implementation="flash_attention_2"
248
+ print("\nEnabled flash_attention_2.\n")
249
+
250
+ condition_type = "subject"
251
+ # Load LoRA weights
252
+ # note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
253
+ if lora_weights:
254
+ for lora_weight in lora_weights:
255
+ lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
256
+ lora_weight_set = False
257
+ if lora_configs:
258
+ for config in lora_configs:
259
+ # Load LoRA weights with optional weight_name and adapter_name
260
+ if 'weight_name' in config:
261
+ weight_name = config.get("weight_name")
262
+ adapter_name = config.get("adapter_name")
263
+ lora_collection = config.get("lora_collection")
264
+ if weight_name and adapter_name and lora_collection and lora_weight_set == False:
265
+ pipe.load_lora_weights(
266
+ lora_collection,
267
+ weight_name=weight_name,
268
+ adapter_name=adapter_name,
269
+ token=constants.HF_API_TOKEN
270
+ )
271
+ lora_weight_set = True
272
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
273
+ elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
274
+ pipe.load_lora_weights(
275
+ lora_collection,
276
+ weight_name=weight_name,
277
+ token=constants.HF_API_TOKEN
278
+ )
279
+ lora_weight_set = True
280
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
281
+ elif weight_name and adapter_name and lora_weight_set == False:
282
+ pipe.load_lora_weights(
283
+ lora_weight,
284
+ weight_name=weight_name,
285
+ adapter_name=adapter_name,
286
+ token=constants.HF_API_TOKEN
287
+ )
288
+ lora_weight_set = True
289
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
290
+ elif weight_name and adapter_name==None and lora_weight_set == False:
291
+ pipe.load_lora_weights(
292
+ lora_weight,
293
+ weight_name=weight_name,
294
+ token=constants.HF_API_TOKEN
295
+ )
296
+ lora_weight_set = True
297
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
298
+ elif lora_weight_set == False:
299
+ pipe.load_lora_weights(
300
+ lora_weight,
301
+ token=constants.HF_API_TOKEN
302
+ )
303
+ lora_weight_set = True
304
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
305
+ # Apply 'pipe' configurations if present
306
+ if 'pipe' in config:
307
+ pipe_config = config['pipe']
308
+ for method_name, params in pipe_config.items():
309
+ method = getattr(pipe, method_name, None)
310
+ if method:
311
+ print(f"Applying pipe method: {method_name} with params: {params}")
312
+ method(**params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  else:
314
+ print(f"Method {method_name} not found in pipe.")
315
+ if 'condition_type' in config:
316
+ condition_type = config['condition_type']
317
+ if condition_type == "coloring":
318
+ #pipe.enable_coloring()
319
+ print("\nEnabled coloring.\n")
320
+ elif condition_type == "deblurring":
321
+ #pipe.enable_deblurring()
322
+ print("\nEnabled deblurring.\n")
323
+ elif condition_type == "fill":
324
+ #pipe.enable_fill()
325
+ print("\nEnabled fill.\n")
326
+ elif condition_type == "depth":
327
+ #pipe.enable_depth()
328
+ print("\nEnabled depth.\n")
329
+ elif condition_type == "canny":
330
+ #pipe.enable_canny()
331
+ print("\nEnabled canny.\n")
332
+ elif condition_type == "subject":
333
+ #pipe.enable_subject()
334
+ print("\nEnabled subject.\n")
335
+ else:
336
+ print(f"Condition type {condition_type} not implemented.")
337
+ else:
338
+ pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
339
+ # Set the random seed for reproducibility
340
+ generator = torch.Generator(device=device).manual_seed(seed)
341
+ conditions = []
342
+ if conditioned_image is not None:
343
+ conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
344
+ condition = Condition(condition_type, conditioned_image)
345
+ conditions.append(condition)
346
+ print(f"\nAdded conditioned image.\n {conditioned_image.size}")
347
+ # Prepare the parameters for image generation
348
+ additional_parameters ={
349
+ "strength": strength,
350
+ "image": conditioned_image,
351
+ }
352
+ else:
353
+ print("\nNo conditioned image provided.")
354
+ if neg_prompt!=None:
355
+ true_cfg_scale=1.1
356
+ additional_parameters ={
357
+ "negative_prompt": neg_prompt,
358
+ "true_cfg_scale": true_cfg_scale,
359
+ }
360
+ # handle long prompts by splitting them
361
+ if approximate_token_count(text) > 76:
362
+ prompt, prompt2 = split_prompt_precisely(text)
363
+ prompt_parameters = {
364
+ "prompt" : prompt,
365
+ "prompt_2": prompt2
366
  }
367
+ else:
368
+ prompt_parameters = {
369
+ "prompt" :text
370
+ }
371
+ additional_parameters.update(prompt_parameters)
372
+ # Combine all parameters
373
+ generate_params = {
374
+ "height": image_height,
375
+ "width": image_width,
376
+ "guidance_scale": guidance_scale,
377
+ "num_inference_steps": num_inference_steps,
378
+ "generator": generator, }
379
+ if additional_parameters:
380
+ generate_params.update(additional_parameters)
381
+ generate_params = {k: v for k, v in generate_params.items() if v is not None}
382
+ print(f"generate_params: {generate_params}")
383
+ # Generate the image
384
+ result = pipe(**generate_params)
385
+ image = result.images[0]
386
+ # Clean up
387
+ del result
388
+ del conditions
389
+ del generator
390
+ # Delete the pipeline and clear cache
391
+ del pipe
392
+ torch.cuda.empty_cache()
393
+ torch.cuda.ipc_collect()
394
+ print(torch.cuda.memory_summary(device=None, abbreviated=False))
395
 
396
+ return image
397
+
398
+ def generate_ai_image_local (
399
+ map_option,
400
+ prompt_textbox_value,
401
+ neg_prompt_textbox_value,
402
+ model="black-forest-labs/FLUX.1-dev",
403
+ lora_weights=None,
404
+ conditioned_image=None,
405
+ height=512,
406
+ width=912,
407
+ num_inference_steps=30,
408
+ guidance_scale=3.5,
409
+ seed=777,
410
+ pipeline_name="FluxPipeline",
411
+ strength=0.75,
412
+ progress=gr.Progress(track_tqdm=True)
413
+ ):
414
+ print(f"Generating image with lowmem")
415
+ try:
416
+ if map_option != "Prompt":
417
+ prompt = constants.PROMPTS[map_option]
418
+ negative_prompt = constants.NEGATIVE_PROMPTS.get(map_option, "")
419
+ else:
420
+ prompt = prompt_textbox_value
421
+ negative_prompt = neg_prompt_textbox_value or ""
422
+ #full_prompt = f"{prompt} {negative_prompt}"
423
+ additional_parameters = {}
424
+ if lora_weights:
425
+ for lora_weight in lora_weights:
426
+ lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
427
+ for config in lora_configs:
428
+ if 'parameters' in config:
429
+ additional_parameters.update(config['parameters'])
430
+ elif 'trigger_words' in config:
431
+ trigger_words = get_trigger_words(lora_weight)
432
+ prompt = f"{trigger_words} {prompt}"
433
+ for key, value in additional_parameters.items():
434
+ if key in ['height', 'width', 'num_inference_steps', 'max_sequence_length']:
435
+ additional_parameters[key] = int(value)
436
+ elif key in ['guidance_scale','true_cfg_scale']:
437
+ additional_parameters[key] = float(value)
438
+ height = additional_parameters.pop('height', height)
439
+ width = additional_parameters.pop('width', width)
440
+ num_inference_steps = additional_parameters.pop('num_inference_steps', num_inference_steps)
441
+ guidance_scale = additional_parameters.pop('guidance_scale', guidance_scale)
442
+ print("Generating image with the following parameters:")
443
+ print(f"Model: {model}")
444
+ print(f"LoRA Weights: {lora_weights}")
445
+ print(f"Prompt: {prompt}")
446
+ print(f"Neg Prompt: {negative_prompt}")
447
+ print(f"Height: {height}")
448
+ print(f"Width: {width}")
449
+ print(f"Number of Inference Steps: {num_inference_steps}")
450
+ print(f"Guidance Scale: {guidance_scale}")
451
+ print(f"Seed: {seed}")
452
+ print(f"Additional Parameters: {additional_parameters}")
453
+ print(f"Conditioned Image: {conditioned_image}")
454
+ print(f"Conditioned Image Strength: {strength}")
455
+ print(f"pipeline: {pipeline_name}")
456
+ image = generate_image_lowmem(
457
+ text=prompt,
458
+ model_name=model,
459
+ neg_prompt=negative_prompt,
460
+ lora_weights=lora_weights,
461
+ conditioned_image=conditioned_image,
462
+ image_width=width,
463
+ image_height=height,
464
+ guidance_scale=guidance_scale,
465
+ num_inference_steps=num_inference_steps,
466
+ seed=seed,
467
+ pipeline_name=pipeline_name,
468
+ strength=strength,
469
+ additional_parameters=additional_parameters
470
+ )
471
+ with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
472
+ image.save(tmp.name, format="PNG")
473
+ constants.temp_files.append(tmp.name)
474
+ print(f"Image saved to {tmp.name}")
475
+ #release_torch_resources()
 
 
 
 
 
 
476
  gc.collect()
477
+ return tmp.name
478
+ except Exception as e:
479
+ print(f"Error generating AI image: {e}")
480
+ #release_torch_resources()
481
+ gc.collect()
482
+ return None
483
+
484
+ @spaces.GPU(duration=140,progress=gr.Progress(track_tqdm=True))
485
+ def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=(8/3), progress=gr.Progress(track_tqdm=True)):
486
+ if randomize_seed:
487
+ seed = random.randint(0, constants.MAX_SEED)
488
+
489
  # Get the model and LoRA weights
490
  model, lora_weights = get_model_and_lora(model_textbox_value)
491
  global current_prerendered_image
 
571
  margin_color = detect_color_format(blank_color)
572
  print(f"Adding border to image with width: {mask_width}, height: {mask_height}, color: {margin_color}")
573
  return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
574
+
575
+ @spaces.GPU()
576
+ def getVersions():
577
+ return versions_html()
578
+ generate_input_image_click.zerogpu = True
579
+ def main(debug=False):
580
  title = "HexaGrid Creator"
581
  #description = "Customizable Hexagon Grid Image Generator"
582
  examples = [["assets//examples//hex_map_p1.png", 32, 1, 0, 0, 0, 0, 0, "#ede9ac44","#12165380", True]]
 
802
  # Gallery from PRE_RENDERED_IMAGES GOES HERE
803
  prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images(constants.pre_rendered_maps_paths), elem_id="gallery", elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
804
  with gr.Row():
805
+ image_guidance_stength = gr.Slider(label="Image Guidance Strength (prompt <-> image)", minimum=0, maximum=1.0, value=0.5, step=0.01, interactive=True)
806
  with gr.Column():
807
  replace_input_image_button = gr.Button(
808
  "Replace Input Image",
 
899
  ],
900
  inputs=[input_image, filter_color, fill_hex, start_x, start_y, end_x, end_y, x_spacing, y_spacing, hex_size, rotation, border_size, border_color, border_opacity],
901
  elem_id="examples")
 
 
902
  with gr.Row():
903
+ gr.HTML(value=getVersions(), visible=True, elem_id="versions")
904
+ # with gr.Row():
905
+ # reinstall_torch = gr.Button("Reinstall Torch", elem_classes="solid small", variant="secondary")
906
+ # reinstall_cuda_toolkit = gr.Button("Install CUDA Toolkit", elem_classes="solid small", variant="secondary")
907
+ # reinitialize_cuda = gr.Button("Reinitialize CUDA", elem_classes="solid small", variant="secondary")
908
+ # torch_release = gr.Button("Release Torch Resources", elem_classes="solid small", variant="secondary")
909
+
910
+ # reinitialize_cuda.click(
911
+ # fn=initialize_cuda,
912
+ # inputs=[],
913
+ # outputs=[]
914
+ # )
915
+ # torch_release.click(
916
+ # fn=release_torch_resources,
917
+ # inputs=[],
918
+ # outputs=[]
919
+ # )
920
+ # reinstall_torch.click(
921
+ # fn=install_torch,
922
+ # inputs=[],
923
+ # outputs=[]
924
+ # )
925
+
926
+ # reinstall_cuda_toolkit.click(
927
+ # fn=install_cuda_toolkit,
928
+ # inputs=[],
929
+ # outputs=[]
930
+ # )
931
 
932
  color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
933
  color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
 
1015
  outputs=[bordered_image_output],
1016
  scroll_to_output=True
1017
  )
1018
+
1019
+ beeuty.queue(default_concurrency_limit=2,max_size=12,api_open=False)
1020
  beeuty.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")
1021
 
1022
 
 
1025
  format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
1026
  )
1027
  logging.info("Environment Variables: %s" % os.environ)
1028
+ # if _get_output(["nvcc", "--version"]) is None:
1029
+ # logging.info("Installing CUDA toolkit...")
1030
+ # install_cuda_toolkit()
1031
+ # else:
1032
+ # logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
1033
+
1034
+ # logging.info("Installing CUDA extensions...")
1035
+ # setup_runtime_env()
1036
+ #main(os.getenv("DEBUG") == "1")
1037
+ main()
utils/ai_generator_diffusers_flux.py CHANGED
@@ -5,7 +5,7 @@ import utils.constants as constants
5
  import gradio as gr
6
  from torch import __version__ as torch_version_, version, cuda, bfloat16, float32, Generator, no_grad, backends
7
  from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
8
- #import accelerate
9
  from transformers import AutoTokenizer
10
  import safetensors
11
  #import xformers
@@ -19,9 +19,9 @@ from utils.image_utils import (
19
  )
20
  from utils.version_info import (
21
  get_torch_info,
22
- get_diffusers_version,
23
- get_transformers_version,
24
- get_xformers_version,
25
  initialize_cuda,
26
  release_torch_resources
27
  )
 
5
  import gradio as gr
6
  from torch import __version__ as torch_version_, version, cuda, bfloat16, float32, Generator, no_grad, backends
7
  from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
8
+ import accelerate
9
  from transformers import AutoTokenizer
10
  import safetensors
11
  #import xformers
 
19
  )
20
  from utils.version_info import (
21
  get_torch_info,
22
+ # get_diffusers_version,
23
+ # get_transformers_version,
24
+ # get_xformers_version,
25
  initialize_cuda,
26
  release_torch_resources
27
  )
utils/constants.py CHANGED
@@ -40,6 +40,7 @@ if not HF_API_TOKEN:
40
 
41
  default_lut_example_img = "./LUT/daisy.jpg"
42
  MAX_SEED = np.iinfo(np.int32).max
 
43
 
44
  PROMPTS = {
45
  "BorderBlack": "Top-down view of a hexagon-based alien map with black borders. Features rivers, mountains, volcanoes, and snow at top and bottom. Colors: light blue, green, tan, brown. No reflections or shadows. Partial hexes on edges are black.",
 
40
 
41
  default_lut_example_img = "./LUT/daisy.jpg"
42
  MAX_SEED = np.iinfo(np.int32).max
43
+ TARGET_SIZE = (2688,1536)
44
 
45
  PROMPTS = {
46
  "BorderBlack": "Top-down view of a hexagon-based alien map with black borders. Features rivers, mountains, volcanoes, and snow at top and bottom. Colors: light blue, green, tan, brown. No reflections or shadows. Partial hexes on edges are black.",
utils/version_info.py CHANGED
@@ -106,7 +106,7 @@ def versions_html():
106
  &#x2000;•&#x2000;
107
  transformers: {get_transformers_version()}
108
  &#x2000;•&#x2000;
109
- xformers: {get_xformers_version()}
110
  &#x2000;•&#x2000;
111
  gradio: {gr.__version__}
112
  &#x2000;•&#x2000;
 
106
  &#x2000;•&#x2000;
107
  transformers: {get_transformers_version()}
108
  &#x2000;•&#x2000;
109
+ safetensors: {get_safetensors_version()}
110
  &#x2000;•&#x2000;
111
  gradio: {gr.__version__}
112
  &#x2000;•&#x2000;
web-ui.bat CHANGED
@@ -1,2 +1,5 @@
1
- py -m app
 
 
 
2
  pause
 
1
+ set NVIDIA_VISIBLE_DEVICES=0
2
+ set CUDA_VISIBLE_DEVICES=0
3
+ set CUDA_MODULE_LOADING=LAZY
4
+ python -m app
5
  pause