Surn commited on
Commit
ab4cf94
·
1 Parent(s): cc795e5

Change Torch references

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  python_version: 3.10.13
8
- sdk_version: 5.15.0
9
  app_file: app.py
10
  pinned: false
11
  short_description: Transform Your Images into Mesmerizing Hexagon Grids
 
5
  colorTo: purple
6
  sdk: gradio
7
  python_version: 3.10.13
8
+ sdk_version: 5.16.0
9
  app_file: app.py
10
  pinned: false
11
  short_description: Transform Your Images into Mesmerizing Hexagon Grids
app.py CHANGED
@@ -1,14 +1,15 @@
1
- import spaces
 
 
2
  import gradio as gr
3
  from PIL import Image
4
  from haishoku.haishoku import Haishoku
5
- #import os
6
  from tempfile import NamedTemporaryFile
7
  #from pathlib import Path
8
  import atexit
9
  import random
10
- # Import constants
11
- import utils.constants as constants
12
 
13
  IS_SHARED_SPACE = constants.IS_SHARED_SPACE
14
 
@@ -20,7 +21,7 @@ from utils.color_utils import (
20
  detect_color_format,
21
  update_color_opacity,
22
  )
23
- from utils.misc import (get_filename, pause, convert_ratio_to_dimensions)
24
  from utils.depth_estimation import estimate_depth, create_3d_model, generate_depth_and_3d, generate_depth_button_click
25
 
26
  from utils.image_utils import (
@@ -33,7 +34,8 @@ from utils.image_utils import (
33
  show_lut,
34
  apply_lut_to_image_path,
35
  multiply_and_blend_images,
36
- alpha_composite_with_control
 
37
  )
38
 
39
  from utils.hex_grid import (
@@ -50,11 +52,13 @@ from utils.excluded_colors import (
50
  on_color_display_select
51
  )
52
 
53
- from utils.ai_generator import (
54
- generate_ai_image,
55
- )
56
  from utils.version_info import (
57
  versions_html,
 
 
58
  get_torch_info
59
  )
60
  from utils.lora_details import (
@@ -138,8 +142,7 @@ def hex_create(hex_size, border_size, input_image_path, start_x, start_y, end_x,
138
  add_hex_text_option,
139
  custom_text_list,
140
  custom_text_color_list
141
- )
142
-
143
  return grid_image
144
 
145
  def get_model_and_lora(model_textbox):
@@ -159,7 +162,324 @@ def get_model_and_lora(model_textbox):
159
  default_model = model_textbox
160
  return default_model, []
161
 
162
- def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=3, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  # Get the model and LoRA weights
164
  model, lora_weights = get_model_and_lora(model_textbox_value)
165
  global current_prerendered_image
@@ -168,7 +488,7 @@ def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt
168
  if use_conditioned_image:
169
  print(f"Conditioned path: {current_prerendered_image.value}.. converting to RGB\n")
170
  # ensure the conditioned image is an image and not a string, cannot use RGBA
171
- if isinstance(current_prerendered_image.value, str):
172
  conditioned_image = open_image(current_prerendered_image.value).convert("RGB")
173
  print(f"Conditioned Image: {conditioned_image.size}.. converted to RGB\n")
174
 
@@ -176,20 +496,23 @@ def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt
176
  width_ratio, height_ratio = map(int, image_format.split(":"))
177
  aspect_ratio = width_ratio / height_ratio
178
 
179
- width, height = convert_ratio_to_dimensions(aspect_ratio, 512)
180
-
 
 
181
  # Generate the AI image and get the image path
182
- image_path = generate_ai_image(
183
  map_option,
184
  prompt_textbox_value,
185
  negative_prompt_textbox_value,
186
  model,
187
  lora_weights,
188
  conditioned_image,
189
- stength=strength,
190
  height=height,
191
  width=width,
192
- seed=seed
 
193
  )
194
 
195
  # Open the generated image
@@ -207,7 +530,7 @@ def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt
207
  upscaled_image.save(tmp_upscaled.name, format="PNG")
208
  constants.temp_files.append(tmp_upscaled.name)
209
  print(f"Upscaled image saved to {tmp_upscaled.name}")
210
-
211
  # Return the path of the upscaled image
212
  return tmp_upscaled.name
213
 
@@ -236,423 +559,467 @@ def combine_images_with_lerp(input_image, output_image, alpha):
236
  print(f"Combining images with alpha: {alpha}")
237
  return lerp_imagemath(in_image, out_image, alpha)
238
 
239
- def add_border(image, mask_width, mask_height, blank_color):
 
240
  bordered_image_output = Image.open(image).convert("RGBA")
241
  margin_color = detect_color_format(blank_color)
242
  print(f"Adding border to image with width: {mask_width}, height: {mask_height}, color: {margin_color}")
243
  return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
 
 
 
 
244
 
245
- title = "HexaGrid Creator"
246
- description = "Customizable Hexagon Grid Image Generator"
247
- examples = [["assets//examples//hex_map_p1.png", 32, 1, 0, 0, 0, 0, 0, "#ede9ac44","#12165380", True]]
248
 
249
- gr.set_static_paths(paths=["images/","images/images","images/prerendered","LUT/","fonts/"])
250
- # Gradio Blocks Interface
251
- with gr.Blocks(css_paths="style_20250128.css", title="HexaGrid Creator", theme='Surn/beeuty') as beeuty:
252
- with gr.Row():
253
- gr.Markdown("""
254
- # HexaGrid Creator
255
- ## Transform Your Images into Mesmerizing Hexagon Grid Masterpieces! ⬢""", elem_classes="intro")
256
- with gr.Row():
257
- with gr.Accordion("Welcome to HexaGrid Creator, the ultimate tool for transforming your images into stunning hexagon grid artworks. Whether you're a tabletop game enthusiast, a digital artist, or someone who loves unique patterns, HexaGrid Creator has something for you.", open=False, elem_classes="intro"):
258
- gr.Markdown ("""
259
 
260
- ## Drop an image into the Input Image and get started!
261
 
262
 
263
 
264
- ## What is HexaGrid Creator?
265
- HexaGrid Creator is a web-based application that allows you to apply a hexagon grid overlay to any image. You can customize the size, color, and opacity of the hexagons, as well as the background and border colors. The result is a visually striking image that looks like it was made from hexagonal tiles!
266
-
267
- ### What Can You Do?
268
- - **Generate Hexagon Grids:** Create beautiful hexagon grid overlays on any image with fully customizable parameters.
269
- - **AI-Powered Image Generation:** Use advanced AI models to generate images based on your prompts and apply hexagon grids to them.
270
- - **Color Exclusion:** Select and exclude specific colors from your hexagon grid for a cleaner and more refined look.
271
- - **Interactive Customization:** Adjust hexagon size, border size, rotation, background color, and more in real-time.
272
- - **Depth and 3D Model Generation:** Generate depth maps and 3D models from your images for enhanced visualization.
273
- - **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
274
- - **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
275
- - **Add Margins:** Add customizable margins around your images for a polished finish.
276
-
277
- ### Why You'll Love It
278
- - **Fun and Easy to Use:** With an intuitive interface and real-time previews, creating hexagon grids has never been this fun!
279
- - **Endless Creativity:** Unleash your creativity with endless customization options and see your images transform in unique ways.
280
- - **Hexagon-Inspired Theme:** Enjoy a delightful yellow and purple theme inspired by hexagons! ⬢
281
- - **Advanced AI Models:** Leverage advanced AI models and LoRA weights for high-quality image generation and customization.
282
-
283
- ### Get Started
284
- 1. **Upload or Generate an Image:** Start by uploading your own image or generate one using our AI-powered tool.
285
- 2. **Customize Your Grid:** Play around with the settings to create the perfect hexagon grid overlay.
286
- 3. **Download and Share:** Once you're happy with your creation, download it and share it with the world!
287
-
288
- ### Advanced Features
289
- - **Generative AI Integration:** Utilize models like `black-forest-labs/FLUX.1-dev` and various LoRA weights for generating unique images.
290
- - **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
291
- - **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
292
- - **Depth and 3D Model Generation:** Create depth maps and 3D models from your images for enhanced visualization.
293
- - **Add Margins:** Customize margins around your images for a polished finish.
294
-
295
- Join the hive and start creating with HexaGrid Creator today!
296
-
297
- """, elem_classes="intro")
298
- with gr.Row():
299
- from utils.image_utils import convert_to_rgba_png
300
-
301
- # Existing code
302
- with gr.Column(scale=2):
303
- input_image = gr.Image(
304
- label="Input Image",
305
- type="filepath",
306
- interactive=True,
307
- elem_classes="centered solid imgcontainer",
308
- key="imgInput",
309
- image_mode=None,
310
- format="PNG",
311
- show_download_button=True,
312
- )
313
 
314
- # New code to convert input image to RGBA PNG
315
- def on_input_image_change(image_path):
316
- if image_path is None:
317
- gr.Warning("Please upload an Input Image to get started.")
318
- return None
319
- img, img_path = convert_to_rgba_png(image_path)
320
- return img_path
321
-
322
- input_image.change(
323
- fn=on_input_image_change,
324
- inputs=[input_image],
325
- outputs=[input_image], scroll_to_output=True,
326
- )
327
- with gr.Column():
328
- with gr.Accordion("Hex Coloring and Exclusion", open = False):
329
- with gr.Row():
330
- with gr.Column():
331
- color_picker = gr.ColorPicker(label="Pick a color to exclude",value="#505050")
332
- with gr.Column():
333
- filter_color = gr.Checkbox(label="Filter Excluded Colors from Sampling", value=False,)
334
- exclude_color_button = gr.Button("Exclude Color", elem_id="exlude_color_button", elem_classes="solid")
335
- color_display = gr.DataFrame(label="List of Excluded RGBA Colors", headers=["R", "G", "B", "A"], elem_id="excluded_colors", type="array", value=build_dataframe(excluded_color_list), interactive=True, elem_classes="solid centered")
336
- selected_row = gr.Number(0, label="Selected Row", visible=False)
337
- delete_button = gr.Button("Delete Row", elem_id="delete_exclusion_button", elem_classes="solid")
338
- fill_hex = gr.Checkbox(label="Fill Hex with color from Image", value=True)
339
- with gr.Accordion("Image Filters", open = False):
340
- with gr.Row():
341
- with gr.Column():
342
- composite_color = gr.ColorPicker(label="Color", value="#ede9ac44")
343
- with gr.Column():
344
- composite_opacity = gr.Slider(label="Opacity %", minimum=0, maximum=100, value=50, interactive=True)
345
- with gr.Row():
346
- composite_button = gr.Button("Composite", elem_classes="solid")
347
- with gr.Row():
348
- with gr.Column():
349
- lut_filename = gr.Textbox(
350
- value="",
351
- label="Look Up Table (LUT) File Name",
352
- elem_id="lutFileName")
353
- with gr.Column():
354
- lut_file = gr.File(
355
- value=None,
356
- file_count="single",
357
- file_types=[".cube"],
358
- type="filepath",
359
- label="LUT cube File")
360
- with gr.Row():
361
- lut_example_image = gr.Image(type="pil", label="Filter (LUT) Example Image", value=constants.default_lut_example_img)
362
- with gr.Row():
363
- with gr.Column():
364
- gr.Markdown("""
365
- ### Included Filters (LUTs)
366
- There are several included Filters:
367
 
368
- Try them on the example image before applying to your Input Image.
369
- """, elem_id="lut_markdown")
370
- with gr.Column():
371
- gr.Examples(elem_id="lut_examples",
372
- examples=[[f] for f in constants.lut_files],
373
- inputs=[lut_filename],
374
- outputs=[lut_filename],
375
- label="Select a Filter (LUT) file. Populate the LUT File Name field"
376
- )
377
-
378
- with gr.Row():
379
- apply_lut_button = gr.Button("Apply Filter (LUT)", elem_classes="solid", elem_id="apply_lut_button")
380
-
381
- lut_file.change(get_filename, inputs=[lut_file], outputs=[lut_filename])
382
- lut_filename.change(show_lut, inputs=[lut_filename, lut_example_image], outputs=[lut_example_image])
383
- apply_lut_button.click(
384
- lambda lut_filename, input_image: gr.Warning("Please upload an Input Image to get started.") if input_image is None else apply_lut_to_image_path(lut_filename, input_image)[0],
385
- inputs=[lut_filename, input_image],
386
- outputs=[input_image],
387
- scroll_to_output=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
  )
389
 
390
- with gr.Row():
391
- with gr.Accordion("Generative AI", open = False):
392
- with gr.Row():
393
- with gr.Column():
394
- model_options = gr.Dropdown(
395
- label="Model Options",
396
- choices=constants.MODELS + constants.LORA_WEIGHTS + ["Manual Entry"],
397
- value="Cossale/Frames2-Flex.1",
398
- elem_classes="solid"
399
- )
400
- model_textbox = gr.Textbox(
401
- label="LORA/Model",
402
- value="Cossale/Frames2-Flex.1",
403
- elem_classes="solid",
404
- elem_id="inference_model",
405
- visible=False
406
- )
407
- # Update map_options to a Dropdown with choices from constants.PROMPTS keys
408
  with gr.Row():
409
  with gr.Column():
410
- map_options = gr.Dropdown(
411
- label="Map Options",
412
- choices=list(constants.PROMPTS.keys()),
413
- value="Alien Landscape",
414
- elem_classes="solid",
415
- scale=0
416
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  with gr.Column():
418
- # Add Dropdown for sizing of Images, height and width based on selection. Options are 16x9, 16x10, 4x5, 1x1
419
- # The values of height and width are based on common resolutions for each aspect ratio
420
- # Default to 16x9, 912x512
421
- image_size_ratio = gr.Dropdown(label="Image Size", choices=["16:9", "16:10", "4:5", "4:3", "2:1","3:2","1:1", "9:16", "10:16", "5:4", "3:4","1:2", "2:3"], value="16:9", elem_classes="solid", type="value", scale=0, interactive=True)
 
 
422
  with gr.Column():
423
- seed = gr.Slider(
424
- label="Seed",
425
- minimum=0,
426
- maximum=constants.MAX_SEED,
427
- step=1,
428
- value=0,
429
- scale=0
430
  )
431
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True, scale=0, interactive=True)
432
- prompt_textbox = gr.Textbox(
433
- label="Prompt",
434
- visible=False,
435
- elem_classes="solid",
436
- value="top-down, (tabletop_map built from small hexagon pieces) hexagon map of a Battletech_boardgame forest with lakes, forest, magic fauna, and snow at the top and bottom, (middle is dark, no_reflections, no_shadows) , tall and short hexagon tiles. Viewed from above.",
437
- lines=4
438
- )
439
- negative_prompt_textbox = gr.Textbox(
440
- label="Negative Prompt",
441
- visible=False,
442
- elem_classes="solid",
443
- value="low quality, bad anatomy, blurry, cropped, worst quality, shadows, people, humans, reflections, shadows, realistic map of the Earth, isometric, text"
444
- )
445
- prompt_notes_label = gr.Label(
446
- "You should use FRM$ as trigger words. @1.5 minutes",
447
- elem_classes="solid centered small",
448
- show_label=False,
449
- visible=False
450
- )
451
- # Keep the change event to maintain functionality
452
- map_options.change(
453
- fn=update_prompt_visibility,
454
- inputs=[map_options],
455
- outputs=[prompt_textbox, negative_prompt_textbox, prompt_notes_label]
456
- )
457
  with gr.Row():
458
- generate_input_image = gr.Button(
459
- "Generate AI Image",
460
- elem_id="generate_input_image",
461
- elem_classes="solid"
 
 
 
 
 
462
  )
463
- with gr.Column(scale=2):
464
- with gr.Accordion("Template Image Styles", open = False):
465
- with gr.Row():
466
- # Gallery from PRE_RENDERED_IMAGES GOES HERE
467
- prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images(constants.pre_rendered_maps_paths), elem_id="gallery", elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto",file_types=["image"], format="png",allow_preview=False)
468
- with gr.Row():
469
- image_guidance_stength = gr.Slider(label="Image Guidance Strength", minimum=0, maximum=1.0, value=0.5, step=0.05, interactive=True)
470
- with gr.Column():
471
- replace_input_image_button = gr.Button(
472
- "Replace Input Image",
473
- elem_id="prerendered_replace_input_image_button",
474
- elem_classes="solid"
475
- )
476
- with gr.Column():
477
- generate_input_image_from_gallery = gr.Button(
478
- "Generate AI Image from Gallery",
479
- elem_id="generate_input_image_from_gallery",
480
- elem_classes="solid"
481
- )
482
-
483
- with gr.Accordion("Advanced Hexagon Settings", open = False):
484
- with gr.Row():
485
- start_x = gr.Number(label="Start X", value=0, minimum=-512, maximum= 512, precision=0)
486
- start_y = gr.Number(label="Start Y", value=0, minimum=-512, maximum= 512, precision=0)
487
- end_x = gr.Number(label="End X", value=0, minimum=-512, maximum= 512, precision=0)
488
- end_y = gr.Number(label="End Y", value=0, minimum=-512, maximum= 512, precision=0)
489
- with gr.Row():
490
- x_spacing = gr.Number(label="Adjust Horizontal spacing", value=-1, minimum=-200, maximum=200, precision=1)
491
- y_spacing = gr.Number(label="Adjust Vertical spacing", value=1, minimum=-200, maximum=200, precision=1)
492
- with gr.Row():
493
- rotation = gr.Slider(-90, 180, 0.0, 0.1, label="Hexagon Rotation (degree)")
494
- add_hex_text = gr.Dropdown(label="Add Text to Hexagons", choices=[None, "Row-Column Coordinates", "Sequential Numbers", "Playing Cards Sequential", "Playing Cards Alternate Red and Black", "Custom List"], value=None)
495
- with gr.Row():
496
- custom_text_list = gr.TextArea(label="Custom Text List", value=constants.cards_alternating, visible=False,)
497
- custom_text_color_list = gr.TextArea(label="Custom Text Color List", value=constants.card_colors_alternating, visible=False)
498
- with gr.Row():
499
- hex_text_info = gr.Markdown("""
500
- ### Text Color uses the Border Color and Border Opacity, unless you use a custom list.
501
- ### The Custom Text List and Custom Text Color List are comma separated lists.
502
- ### The custom color list is a comma separated list of hex colors.
503
- #### Example: "A,2,3,4,5,6,7,8,9,10,J,Q,K", "red,#0000FF,#00FF00,red,#FFFF00,#00FFFF,#FF8000,#FF00FF,#FF0080,#FF8000,#FF0080,lightblue"
504
- """, elem_id="hex_text_info", visible=False)
505
- add_hex_text.change(
506
- fn=lambda x: (
507
- gr.update(visible=(x == "Custom List")),
508
- gr.update(visible=(x == "Custom List")),
509
- gr.update(visible=(x != None))
510
- ),
511
- inputs=add_hex_text,
512
- outputs=[custom_text_list, custom_text_color_list, hex_text_info]
513
- )
514
- with gr.Row():
515
- hex_size = gr.Number(label="Hexagon Size", value=32, minimum=1, maximum=768)
516
- border_size = gr.Slider(-5,25,value=0,step=1,label="Border Size")
517
- with gr.Row():
518
- background_color = gr.ColorPicker(label="Background Color", value="#000000", interactive=True)
519
- background_opacity = gr.Slider(0,100,0,1,label="Background Opacity %")
520
- border_color = gr.ColorPicker(label="Border Color", value="#7b7b7b", interactive=True)
521
- border_opacity = gr.Slider(0,100,0,1,label="Border Opacity %")
522
- with gr.Row():
523
- hex_button = gr.Button("Generate Hex Grid!", elem_classes="solid", elem_id="btn-generate")
524
- with gr.Row():
525
- output_image = gr.Image(label="Hexagon Grid Image", image_mode = "RGBA", show_download_button=True, show_share_button=True,elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgOutput")
526
- overlay_image = gr.Image(label="Hexagon Overlay Image", image_mode = "RGBA", show_share_button=True, elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgOverlay")
527
- with gr.Row():
528
- output_overlay_composite = gr.Slider(0,100,50,0.5, label="Interpolate Intensity")
529
- output_blend_multiply_composite = gr.Slider(0,100,50,0.5, label="Overlay Intensity")
530
- output_alpha_composite = gr.Slider(0,100,50,0.5, label="Alpha Composite Intensity")
531
- with gr.Accordion("Add Margins (bleed)", open=False):
532
  with gr.Row():
533
- border_image_source = gr.Radio(label="Add Margins around which Image", choices=["Input Image", "Overlay Image"], value="Overlay Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
534
  with gr.Row():
535
- mask_width = gr.Number(label="Margins Width", value=10, minimum=0, maximum=100, precision=0)
536
- mask_height = gr.Number(label="Margins Height", value=10, minimum=0, maximum=100, precision=0)
 
 
 
 
 
537
  with gr.Row():
538
- margin_color = gr.ColorPicker(label="Margin Color", value="#333333FF", interactive=True)
539
- margin_opacity = gr.Slider(0,100,95,0.5,label="Margin Opacity %")
540
  with gr.Row():
541
- add_border_button = gr.Button("Add Margins", elem_classes="solid", variant="secondary")
 
542
  with gr.Row():
543
- bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", show_download_button=True, show_share_button=True, elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
- with gr.Accordion("Height Maps and 3D", open = False):
546
- with gr.Row():
547
- with gr.Column():
548
- voxel_size_factor = gr.Slider(label="Voxel Size Factor", value=1.00, minimum=0.01, maximum=40.00, step=0.01)
549
- with gr.Column():
550
- depth_image_source = gr.Radio(label="Depth Image Source", choices=["Input Image", "Output Image", "Overlay Image","Image with Margins"], value="Input Image")
 
 
 
 
 
551
  with gr.Row():
552
- generate_depth_button = gr.Button("Generate Depth Map and 3D Model From Selected Image", elem_classes="solid", variant="secondary")
 
 
 
 
 
 
 
 
 
 
553
  with gr.Row():
554
- depth_map_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgDepth")
555
- model_output = gr.Model3D(label="3D Model", clear_color=[1.0, 1.0, 1.0, 0.25], key="Img3D", elem_classes="centered solid imgcontainer")
556
- with gr.Row():
557
- gr.Examples(examples=[
558
- ["assets//examples//hex_map_p1.png", False, True, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 15],
559
- ["assets//examples//hex_map_p1_overlayed.png", False, False, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 75],
560
- ["assets//examples//hex_flower_logo.png", False, True, -95,-95,100,100,-24,-2,190,30,2,"#FF8951", 50],
561
- ["assets//examples//hexed_fract_1.png", False, True, 0,0,0,0,0,0,10,0,0,"#000000", 5],
562
- ["assets//examples//tmpzt3mblvk.png", False, True, -20,10,0,0,-6,-2,35,30,1,"#ffffff", 0],
563
- ],
564
- inputs=[input_image, filter_color, fill_hex, start_x, start_y, end_x, end_y, x_spacing, y_spacing, hex_size, rotation, border_size, border_color, border_opacity],
565
- elem_id="examples")
566
- with gr.Row():
567
- gr.HTML(value=versions_html(), visible=True, elem_id="versions")
568
-
569
- color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
570
- color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
571
-
572
- delete_button.click(fn=delete_color, inputs=[selected_row, color_display], outputs=[color_display])
573
- exclude_color_button.click(fn=add_color, inputs=[color_picker, gr.State(excluded_color_list)], outputs=[color_display, gr.State(excluded_color_list)])
574
- hex_button.click(
575
- fn=lambda hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list:
576
- gr.Warning("Please upload an Input Image to get started.") if input_image is None else hex_create(hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list),
577
- inputs=[hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list],
578
- outputs=[output_image, overlay_image],
579
- scroll_to_output=True
580
- )
581
- generate_input_image.click(
582
- fn=generate_input_image_click,
583
- inputs=[map_options, prompt_textbox, negative_prompt_textbox, model_textbox,gr.State( seed if randomize_seed==False else random.randint(0, constants.MAX_SEED)), gr.State(False), gr.State(0.5), image_size_ratio],
584
- outputs=[input_image], scroll_to_output=True
585
- )
586
- generate_depth_button.click(
587
- fn=generate_depth_button_click,
588
- inputs=[depth_image_source, voxel_size_factor, input_image, output_image, overlay_image, bordered_image_output],
589
- outputs=[depth_map_output, model_output], scroll_to_output=True
590
- )
591
- model_textbox.change(
592
- fn=update_prompt_notes,
593
- inputs=model_textbox,
594
- outputs=prompt_notes_label,preprocess=False
595
- )
596
- model_options.change(
597
- fn=lambda x: (gr.update(visible=(x == "Manual Entry")), gr.update(value=x) if x != "Manual Entry" else gr.update()),
598
- inputs=model_options,
599
- outputs=[model_textbox, model_textbox]
600
- )
601
- model_options.change(
602
- fn=update_prompt_notes,
603
- inputs=model_options,
604
- outputs=prompt_notes_label
605
- )
606
- composite_button.click(
607
- fn=lambda input_image, composite_color, composite_opacity: gr.Warning("Please upload an Input Image to get started.") if input_image is None else change_color(input_image, composite_color, composite_opacity),
608
- inputs=[input_image, composite_color, composite_opacity],
609
- outputs=[input_image]
610
- )
611
 
612
- #use conditioned_image as the input_image for generate_input_image_click
613
- generate_input_image_from_gallery.click(
614
- fn=generate_input_image_click,
615
- inputs=[map_options, prompt_textbox, negative_prompt_textbox, model_textbox, gr.State(True), image_guidance_stength, image_size_ratio],
616
- outputs=[input_image], scroll_to_output=True
617
- )
 
 
 
 
 
 
 
 
 
618
 
619
- # Update the state variable with the prerendered image filepath when an image is selected
620
- prerendered_image_gallery.select(
621
- fn=on_prerendered_gallery_selection,
622
- inputs=None,
623
- outputs=[gr.State(current_prerendered_image)], # Update the state with the selected image
624
- show_api=False
625
- )
626
- # replace input image with selected gallery image
627
- replace_input_image_button.click(
628
- lambda: current_prerendered_image.value,
629
- inputs=None,
630
- outputs=[input_image], scroll_to_output=True
631
- )
632
- output_overlay_composite.change(
633
- fn=combine_images_with_lerp,
634
- inputs=[input_image, output_image, output_overlay_composite],
635
- outputs=[overlay_image], scroll_to_output=True
636
- )
637
- output_blend_multiply_composite.change(
638
- fn=multiply_and_blend_images,
639
- inputs=[input_image, output_image, output_blend_multiply_composite],
640
- outputs=[overlay_image],
641
- scroll_to_output=True
642
- )
643
- output_alpha_composite.change(
644
- fn=alpha_composite_with_control,
645
- inputs=[input_image, output_image, output_alpha_composite],
646
- outputs=[overlay_image],
647
- scroll_to_output=True
648
- )
649
- add_border_button.click(
650
- fn=lambda image_source, mask_w, mask_h, color, opacity, input_img, overlay_img: add_border(input_img if image_source == "Input Image" else overlay_img, mask_w, mask_h, update_color_opacity(detect_color_format(color), opacity * 2.55)),
651
- inputs=[border_image_source, mask_width, mask_height, margin_color, margin_opacity, input_image, overlay_image],
652
- outputs=[bordered_image_output],
653
- scroll_to_output=True
654
- )
655
- (())
656
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657
  beeuty.queue(default_concurrency_limit=1,max_size=12,api_open=False)
658
- beeuty.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ # Import constants
3
+ import utils.constants as constants
4
  import gradio as gr
5
  from PIL import Image
6
  from haishoku.haishoku import Haishoku
7
+
8
  from tempfile import NamedTemporaryFile
9
  #from pathlib import Path
10
  import atexit
11
  import random
12
+ import logging
 
13
 
14
  IS_SHARED_SPACE = constants.IS_SHARED_SPACE
15
 
 
21
  detect_color_format,
22
  update_color_opacity,
23
  )
24
+ from utils.misc import (get_filename, pause, convert_ratio_to_dimensions, install_cuda_toolkit,install_torch, _get_output, setup_runtime_env)
25
  from utils.depth_estimation import estimate_depth, create_3d_model, generate_depth_and_3d, generate_depth_button_click
26
 
27
  from utils.image_utils import (
 
34
  show_lut,
35
  apply_lut_to_image_path,
36
  multiply_and_blend_images,
37
+ alpha_composite_with_control,
38
+ crop_and_resize_image
39
  )
40
 
41
  from utils.hex_grid import (
 
52
  on_color_display_select
53
  )
54
 
55
+ # from utils.ai_generator import (
56
+ # generate_ai_image,
57
+ # )
58
  from utils.version_info import (
59
  versions_html,
60
+ initialize_cuda,
61
+ release_torch_resources,
62
  get_torch_info
63
  )
64
  from utils.lora_details import (
 
142
  add_hex_text_option,
143
  custom_text_list,
144
  custom_text_color_list
145
+ )
 
146
  return grid_image
147
 
148
  def get_model_and_lora(model_textbox):
 
162
  default_model = model_textbox
163
  return default_model, []
164
 
165
+
166
+ def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=(8/3), progress=gr.Progress(track_tqdm=True)):
167
+ import spaces
168
+ from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
169
+ import accelerate
170
+ from transformers import AutoTokenizer
171
+ from utils.lora_details import get_trigger_words, approximate_token_count, split_prompt_precisely
172
+ import gc
173
+ PIPELINE_CLASSES = {
174
+ "FluxPipeline": FluxPipeline,
175
+ "FluxImg2ImgPipeline": FluxImg2ImgPipeline,
176
+ "FluxControlPipeline": FluxControlPipeline
177
+ }
178
+ if randomize_seed:
179
+ seed = random.randint(0, constants.MAX_SEED)
180
+
181
+ @spaces.GPU(progress=gr.Progress(track_tqdm=True))
182
+ def generate_image_lowmem(
183
+ text,
184
+ neg_prompt=None,
185
+ model_name="black-forest-labs/FLUX.1-dev",
186
+ lora_weights=None,
187
+ conditioned_image=None,
188
+ image_width=1368,
189
+ image_height=848,
190
+ guidance_scale=3.5,
191
+ num_inference_steps=30,
192
+ seed=0,
193
+ true_cfg_scale=1.0,
194
+ pipeline_name="FluxPipeline",
195
+ strength=0.75,
196
+ additional_parameters=None,
197
+ progress=gr.Progress(track_tqdm=True)
198
+ ):
199
+ from torch import cuda, bfloat16, float32, Generator, no_grad, backends
200
+ # Retrieve the pipeline class from the mapping
201
+ pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
202
+ if not pipeline_class:
203
+ raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
204
+ f"Available options: {list(PIPELINE_CLASSES.keys())}")
205
+
206
+ #initialize_cuda()
207
+ device = "cuda" if cuda.is_available() else "cpu"
208
+ from src.condition import Condition
209
+
210
+ print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
211
+ print(f"\n {get_torch_info()}\n")
212
+ # Disable gradient calculations
213
+ with no_grad():
214
+ # Initialize the pipeline inside the context manager
215
+ pipe = pipeline_class.from_pretrained(
216
+ model_name,
217
+ torch_dtype=bfloat16 if device == "cuda" else float32
218
+ ).to(device)
219
+ # Optionally, don't use CPU offload if not necessary
220
+
221
+ # alternative version that may be more efficient
222
+ # pipe.enable_sequential_cpu_offload()
223
+ if pipeline_name == "FluxPipeline":
224
+ pipe.enable_model_cpu_offload()
225
+ pipe.vae.enable_slicing()
226
+ pipe.vae.enable_tiling()
227
+ else:
228
+ pipe.enable_model_cpu_offload()
229
+
230
+ # Access the tokenizer from the pipeline
231
+ tokenizer = pipe.tokenizer
232
+
233
+ # Check if add_prefix_space is set and convert to slow tokenizer if necessary
234
+ if getattr(tokenizer, 'add_prefix_space', False):
235
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, device_map = 'cpu')
236
+ # Update the pipeline's tokenizer
237
+ pipe.tokenizer = tokenizer
238
+ pipe.to(device)
239
+
240
+ flash_attention_enabled = backends.cuda.flash_sdp_enabled()
241
+ if flash_attention_enabled == False:
242
+ #Enable xFormers memory-efficient attention (optional)
243
+ #pipe.enable_xformers_memory_efficient_attention()
244
+ print("\nEnabled xFormers memory-efficient attention.\n")
245
+ else:
246
+ pipe.attn_implementation="flash_attention_2"
247
+ print("\nEnabled flash_attention_2.\n")
248
+
249
+ condition_type = "subject"
250
+ # Load LoRA weights
251
+ # note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
252
+ if lora_weights:
253
+ for lora_weight in lora_weights:
254
+ lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
255
+ lora_weight_set = False
256
+ if lora_configs:
257
+ for config in lora_configs:
258
+ # Load LoRA weights with optional weight_name and adapter_name
259
+ if 'weight_name' in config:
260
+ weight_name = config.get("weight_name")
261
+ adapter_name = config.get("adapter_name")
262
+ lora_collection = config.get("lora_collection")
263
+ if weight_name and adapter_name and lora_collection and lora_weight_set == False:
264
+ pipe.load_lora_weights(
265
+ lora_collection,
266
+ weight_name=weight_name,
267
+ adapter_name=adapter_name,
268
+ token=constants.HF_API_TOKEN
269
+ )
270
+ lora_weight_set = True
271
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
272
+ elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
273
+ pipe.load_lora_weights(
274
+ lora_collection,
275
+ weight_name=weight_name,
276
+ token=constants.HF_API_TOKEN
277
+ )
278
+ lora_weight_set = True
279
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
280
+ elif weight_name and adapter_name and lora_weight_set == False:
281
+ pipe.load_lora_weights(
282
+ lora_weight,
283
+ weight_name=weight_name,
284
+ adapter_name=adapter_name,
285
+ token=constants.HF_API_TOKEN
286
+ )
287
+ lora_weight_set = True
288
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
289
+ elif weight_name and adapter_name==None and lora_weight_set == False:
290
+ pipe.load_lora_weights(
291
+ lora_weight,
292
+ weight_name=weight_name,
293
+ token=constants.HF_API_TOKEN
294
+ )
295
+ lora_weight_set = True
296
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
297
+ elif lora_weight_set == False:
298
+ pipe.load_lora_weights(
299
+ lora_weight,
300
+ token=constants.HF_API_TOKEN
301
+ )
302
+ lora_weight_set = True
303
+ print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
304
+ # Apply 'pipe' configurations if present
305
+ if 'pipe' in config:
306
+ pipe_config = config['pipe']
307
+ for method_name, params in pipe_config.items():
308
+ method = getattr(pipe, method_name, None)
309
+ if method:
310
+ print(f"Applying pipe method: {method_name} with params: {params}")
311
+ method(**params)
312
+ else:
313
+ print(f"Method {method_name} not found in pipe.")
314
+ if 'condition_type' in config:
315
+ condition_type = config['condition_type']
316
+ if condition_type == "coloring":
317
+ #pipe.enable_coloring()
318
+ print("\nEnabled coloring.\n")
319
+ elif condition_type == "deblurring":
320
+ #pipe.enable_deblurring()
321
+ print("\nEnabled deblurring.\n")
322
+ elif condition_type == "fill":
323
+ #pipe.enable_fill()
324
+ print("\nEnabled fill.\n")
325
+ elif condition_type == "depth":
326
+ #pipe.enable_depth()
327
+ print("\nEnabled depth.\n")
328
+ elif condition_type == "canny":
329
+ #pipe.enable_canny()
330
+ print("\nEnabled canny.\n")
331
+ elif condition_type == "subject":
332
+ #pipe.enable_subject()
333
+ print("\nEnabled subject.\n")
334
+ else:
335
+ print(f"Condition type {condition_type} not implemented.")
336
+ else:
337
+ pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
338
+ # Set the random seed for reproducibility
339
+ generator = Generator(device=device).manual_seed(seed)
340
+ conditions = []
341
+ if conditioned_image is not None:
342
+ conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
343
+ condition = Condition(condition_type, conditioned_image)
344
+ conditions.append(condition)
345
+ print(f"\nAdded conditioned image.\n {conditioned_image.size}")
346
+ # Prepare the parameters for image generation
347
+ additional_parameters ={
348
+ "strength": strength,
349
+ "image": conditioned_image,
350
+ }
351
+ else:
352
+ print("\nNo conditioned image provided.")
353
+ if neg_prompt!=None:
354
+ true_cfg_scale=1.1
355
+ additional_parameters ={
356
+ "negative_prompt": neg_prompt,
357
+ "true_cfg_scale": true_cfg_scale,
358
+ }
359
+ # handle long prompts by splitting them
360
+ if approximate_token_count(text) > 76:
361
+ prompt, prompt2 = split_prompt_precisely(text)
362
+ prompt_parameters = {
363
+ "prompt" : prompt,
364
+ "prompt_2": prompt2
365
+ }
366
+ else:
367
+ prompt_parameters = {
368
+ "prompt" :text
369
+ }
370
+ additional_parameters.update(prompt_parameters)
371
+ # Combine all parameters
372
+ generate_params = {
373
+ "height": image_height,
374
+ "width": image_width,
375
+ "guidance_scale": guidance_scale,
376
+ "num_inference_steps": num_inference_steps,
377
+ "generator": generator, }
378
+ if additional_parameters:
379
+ generate_params.update(additional_parameters)
380
+ generate_params = {k: v for k, v in generate_params.items() if v is not None}
381
+ print(f"generate_params: {generate_params}")
382
+ # Generate the image
383
+ result = pipe(**generate_params)
384
+ image = result.images[0]
385
+ # Clean up
386
+ del result
387
+ del conditions
388
+ del generator
389
+ # Delete the pipeline and clear cache
390
+ del pipe
391
+ cuda.empty_cache()
392
+ cuda.ipc_collect()
393
+ print(cuda.memory_summary(device=None, abbreviated=False))
394
+
395
+ return image
396
+
397
+ #@spaces.GPU(progress=gr.Progress(track_tqdm=True))
398
+ def generate_ai_image_local (
399
+ map_option,
400
+ prompt_textbox_value,
401
+ neg_prompt_textbox_value,
402
+ model="black-forest-labs/FLUX.1-dev",
403
+ lora_weights=None,
404
+ conditioned_image=None,
405
+ height=512,
406
+ width=912,
407
+ num_inference_steps=30,
408
+ guidance_scale=3.5,
409
+ seed=777,
410
+ pipeline_name="FluxPipeline",
411
+ strength=0.75,
412
+ progress=gr.Progress(track_tqdm=True)
413
+ ):
414
+ print(f"Generating image with lowmem")
415
+ try:
416
+ if map_option != "Prompt":
417
+ prompt = constants.PROMPTS[map_option]
418
+ negative_prompt = constants.NEGATIVE_PROMPTS.get(map_option, "")
419
+ else:
420
+ prompt = prompt_textbox_value
421
+ negative_prompt = neg_prompt_textbox_value or ""
422
+ #full_prompt = f"{prompt} {negative_prompt}"
423
+ additional_parameters = {}
424
+ if lora_weights:
425
+ for lora_weight in lora_weights:
426
+ lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
427
+ for config in lora_configs:
428
+ if 'parameters' in config:
429
+ additional_parameters.update(config['parameters'])
430
+ elif 'trigger_words' in config:
431
+ trigger_words = get_trigger_words(lora_weight)
432
+ prompt = f"{trigger_words} {prompt}"
433
+ for key, value in additional_parameters.items():
434
+ if key in ['height', 'width', 'num_inference_steps', 'max_sequence_length']:
435
+ additional_parameters[key] = int(value)
436
+ elif key in ['guidance_scale','true_cfg_scale']:
437
+ additional_parameters[key] = float(value)
438
+ height = additional_parameters.pop('height', height)
439
+ width = additional_parameters.pop('width', width)
440
+ num_inference_steps = additional_parameters.pop('num_inference_steps', num_inference_steps)
441
+ guidance_scale = additional_parameters.pop('guidance_scale', guidance_scale)
442
+ print("Generating image with the following parameters:")
443
+ print(f"Model: {model}")
444
+ print(f"LoRA Weights: {lora_weights}")
445
+ print(f"Prompt: {prompt}")
446
+ print(f"Neg Prompt: {negative_prompt}")
447
+ print(f"Height: {height}")
448
+ print(f"Width: {width}")
449
+ print(f"Number of Inference Steps: {num_inference_steps}")
450
+ print(f"Guidance Scale: {guidance_scale}")
451
+ print(f"Seed: {seed}")
452
+ print(f"Additional Parameters: {additional_parameters}")
453
+ print(f"Conditioned Image: {conditioned_image}")
454
+ print(f"Conditioned Image Strength: {strength}")
455
+ print(f"pipeline: {pipeline_name}")
456
+ image = generate_image_lowmem(
457
+ text=prompt,
458
+ model_name=model,
459
+ neg_prompt=negative_prompt,
460
+ lora_weights=lora_weights,
461
+ conditioned_image=conditioned_image,
462
+ image_width=width,
463
+ image_height=height,
464
+ guidance_scale=guidance_scale,
465
+ num_inference_steps=num_inference_steps,
466
+ seed=seed,
467
+ pipeline_name=pipeline_name,
468
+ strength=strength,
469
+ additional_parameters=additional_parameters
470
+ )
471
+ with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
472
+ image.save(tmp.name, format="PNG")
473
+ constants.temp_files.append(tmp.name)
474
+ print(f"Image saved to {tmp.name}")
475
+ release_torch_resources()
476
+ gc.collect()
477
+ return tmp.name
478
+ except Exception as e:
479
+ print(f"Error generating AI image: {e}")
480
+ release_torch_resources()
481
+ gc.collect()
482
+ return None
483
  # Get the model and LoRA weights
484
  model, lora_weights = get_model_and_lora(model_textbox_value)
485
  global current_prerendered_image
 
488
  if use_conditioned_image:
489
  print(f"Conditioned path: {current_prerendered_image.value}.. converting to RGB\n")
490
  # ensure the conditioned image is an image and not a string, cannot use RGBA
491
+ if isinstance(current_prerendered_image.value, str):
492
  conditioned_image = open_image(current_prerendered_image.value).convert("RGB")
493
  print(f"Conditioned Image: {conditioned_image.size}.. converted to RGB\n")
494
 
 
496
  width_ratio, height_ratio = map(int, image_format.split(":"))
497
  aspect_ratio = width_ratio / height_ratio
498
 
499
+ width, height = convert_ratio_to_dimensions(aspect_ratio, 576)
500
+ pipeline = "FluxPipeline"
501
+ if conditioned_image is not None:
502
+ pipeline = "FluxImg2ImgPipeline"
503
  # Generate the AI image and get the image path
504
+ image_path = generate_ai_image_local(
505
  map_option,
506
  prompt_textbox_value,
507
  negative_prompt_textbox_value,
508
  model,
509
  lora_weights,
510
  conditioned_image,
511
+ strength=strength,
512
  height=height,
513
  width=width,
514
+ seed=seed,
515
+ pipeline_name=pipeline,
516
  )
517
 
518
  # Open the generated image
 
530
  upscaled_image.save(tmp_upscaled.name, format="PNG")
531
  constants.temp_files.append(tmp_upscaled.name)
532
  print(f"Upscaled image saved to {tmp_upscaled.name}")
533
+
534
  # Return the path of the upscaled image
535
  return tmp_upscaled.name
536
 
 
559
  print(f"Combining images with alpha: {alpha}")
560
  return lerp_imagemath(in_image, out_image, alpha)
561
 
562
+ def add_border(image, mask_width, mask_height, blank_color):
563
+ #install_torch()
564
  bordered_image_output = Image.open(image).convert("RGBA")
565
  margin_color = detect_color_format(blank_color)
566
  print(f"Adding border to image with width: {mask_width}, height: {mask_height}, color: {margin_color}")
567
  return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
568
+ def main(debug):
569
+ title = "HexaGrid Creator"
570
+ #description = "Customizable Hexagon Grid Image Generator"
571
+ examples = [["assets//examples//hex_map_p1.png", 32, 1, 0, 0, 0, 0, 0, "#ede9ac44","#12165380", True]]
572
 
573
+ gr.set_static_paths(paths=["images/","images/images","images/prerendered","LUT/","fonts/"])
 
 
574
 
575
+ # Gradio Blocks Interface
576
+ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty') as beeuty:
577
+ with gr.Row():
578
+ gr.Markdown("""
579
+ # HexaGrid Creator
580
+ ## Transform Your Images into Mesmerizing Hexagon Grid Masterpieces! ⬢""", elem_classes="intro")
581
+ with gr.Row():
582
+ with gr.Accordion("Welcome to HexaGrid Creator, the ultimate tool for transforming your images into stunning hexagon grid artworks. Whether you're a tabletop game enthusiast, a digital artist, or someone who loves unique patterns, HexaGrid Creator has something for you.", open=False, elem_classes="intro"):
583
+ gr.Markdown ("""
 
584
 
585
+ ## Drop an image into the Input Image and get started!
586
 
587
 
588
 
589
+ ## What is HexaGrid Creator?
590
+ HexaGrid Creator is a web-based application that allows you to apply a hexagon grid overlay to any image. You can customize the size, color, and opacity of the hexagons, as well as the background and border colors. The result is a visually striking image that looks like it was made from hexagonal tiles!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
591
 
592
+ ### What Can You Do?
593
+ - **Generate Hexagon Grids:** Create beautiful hexagon grid overlays on any image with fully customizable parameters.
594
+ - **AI-Powered Image Generation:** Use advanced AI models to generate images based on your prompts and apply hexagon grids to them.
595
+ - **Color Exclusion:** Select and exclude specific colors from your hexagon grid for a cleaner and more refined look.
596
+ - **Interactive Customization:** Adjust hexagon size, border size, rotation, background color, and more in real-time.
597
+ - **Depth and 3D Model Generation:** Generate depth maps and 3D models from your images for enhanced visualization.
598
+ - **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
599
+ - **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
600
+ - **Add Margins:** Add customizable margins around your images for a polished finish.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601
 
602
+ ### Why You'll Love It
603
+ - **Fun and Easy to Use:** With an intuitive interface and real-time previews, creating hexagon grids has never been this fun!
604
+ - **Endless Creativity:** Unleash your creativity with endless customization options and see your images transform in unique ways.
605
+ - **Hexagon-Inspired Theme:** Enjoy a delightful yellow and purple theme inspired by hexagons! ⬢
606
+ - **Advanced AI Models:** Leverage advanced AI models and LoRA weights for high-quality image generation and customization.
607
+
608
+ ### Get Started
609
+ 1. **Upload or Generate an Image:** Start by uploading your own image or generate one using our AI-powered tool.
610
+ 2. **Customize Your Grid:** Play around with the settings to create the perfect hexagon grid overlay.
611
+ 3. **Download and Share:** Once you're happy with your creation, download it and share it with the world!
612
+
613
+ ### Advanced Features
614
+ - **Generative AI Integration:** Utilize models like `black-forest-labs/FLUX.1-dev` and various LoRA weights for generating unique images.
615
+ - **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
616
+ - **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
617
+ - **Depth and 3D Model Generation:** Create depth maps and 3D models from your images for enhanced visualization.
618
+ - **Add Margins:** Customize margins around your images for a polished finish.
619
+
620
+ Join the hive and start creating with HexaGrid Creator today!
621
+
622
+ """, elem_classes="intro")
623
+ with gr.Row():
624
+ from utils.image_utils import convert_to_rgba_png
625
+
626
+ # Existing code
627
+ with gr.Column(scale=2):
628
+ input_image = gr.Image(
629
+ label="Input Image",
630
+ type="filepath",
631
+ interactive=True,
632
+ elem_classes="centered solid imgcontainer",
633
+ key="imgInput",
634
+ image_mode=None,
635
+ format="PNG",
636
+ show_download_button=True,
637
  )
638
 
639
+ # New code to convert input image to RGBA PNG
640
+ def on_input_image_change(image_path):
641
+ if image_path is None:
642
+ gr.Warning("Please upload an Input Image to get started.")
643
+ return None
644
+ img, img_path = convert_to_rgba_png(image_path)
645
+ return img_path
646
+
647
+ input_image.change(
648
+ fn=on_input_image_change,
649
+ inputs=[input_image],
650
+ outputs=[input_image], scroll_to_output=True,
651
+ )
652
+ with gr.Column():
653
+ with gr.Accordion("Hex Coloring and Exclusion", open = False):
 
 
 
654
  with gr.Row():
655
  with gr.Column():
656
+ color_picker = gr.ColorPicker(label="Pick a color to exclude",value="#505050")
657
+ with gr.Column():
658
+ filter_color = gr.Checkbox(label="Filter Excluded Colors from Sampling", value=False,)
659
+ exclude_color_button = gr.Button("Exclude Color", elem_id="exlude_color_button", elem_classes="solid")
660
+ color_display = gr.DataFrame(label="List of Excluded RGBA Colors", headers=["R", "G", "B", "A"], elem_id="excluded_colors", type="array", value=build_dataframe(excluded_color_list), interactive=True, elem_classes="solid centered")
661
+ selected_row = gr.Number(0, label="Selected Row", visible=False)
662
+ delete_button = gr.Button("Delete Row", elem_id="delete_exclusion_button", elem_classes="solid")
663
+ fill_hex = gr.Checkbox(label="Fill Hex with color from Image", value=True)
664
+ with gr.Accordion("Image Filters", open = False):
665
+ with gr.Row():
666
+ with gr.Column():
667
+ composite_color = gr.ColorPicker(label="Color", value="#ede9ac44")
668
+ with gr.Column():
669
+ composite_opacity = gr.Slider(label="Opacity %", minimum=0, maximum=100, value=50, interactive=True)
670
+ with gr.Row():
671
+ composite_button = gr.Button("Composite", elem_classes="solid")
672
+ with gr.Row():
673
+ with gr.Column():
674
+ lut_filename = gr.Textbox(
675
+ value="",
676
+ label="Look Up Table (LUT) File Name",
677
+ elem_id="lutFileName")
678
+ with gr.Column():
679
+ lut_file = gr.File(
680
+ value=None,
681
+ file_count="single",
682
+ file_types=[".cube"],
683
+ type="filepath",
684
+ label="LUT cube File")
685
+ with gr.Row():
686
+ lut_example_image = gr.Image(type="pil", label="Filter (LUT) Example Image", value=constants.default_lut_example_img)
687
+ with gr.Row():
688
  with gr.Column():
689
+ gr.Markdown("""
690
+ ### Included Filters (LUTs)
691
+ There are several included Filters:
692
+
693
+ Try them on the example image before applying to your Input Image.
694
+ """, elem_id="lut_markdown")
695
  with gr.Column():
696
+ gr.Examples(elem_id="lut_examples",
697
+ examples=[[f] for f in constants.lut_files],
698
+ inputs=[lut_filename],
699
+ outputs=[lut_filename],
700
+ label="Select a Filter (LUT) file. Populate the LUT File Name field"
 
 
701
  )
702
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703
  with gr.Row():
704
+ apply_lut_button = gr.Button("Apply Filter (LUT)", elem_classes="solid", elem_id="apply_lut_button")
705
+
706
+ lut_file.change(get_filename, inputs=[lut_file], outputs=[lut_filename])
707
+ lut_filename.change(show_lut, inputs=[lut_filename, lut_example_image], outputs=[lut_example_image])
708
+ apply_lut_button.click(
709
+ lambda lut_filename, input_image: gr.Warning("Please upload an Input Image to get started.") if input_image is None else apply_lut_to_image_path(lut_filename, input_image)[0],
710
+ inputs=[lut_filename, input_image],
711
+ outputs=[input_image],
712
+ scroll_to_output=True
713
  )
714
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715
  with gr.Row():
716
+ with gr.Accordion("Generative AI", open = False):
717
+ with gr.Row():
718
+ with gr.Column():
719
+ model_options = gr.Dropdown(
720
+ label="Model Options",
721
+ choices=constants.MODELS + constants.LORA_WEIGHTS + ["Manual Entry"],
722
+ value="Cossale/Frames2-Flex.1",
723
+ elem_classes="solid"
724
+ )
725
+ model_textbox = gr.Textbox(
726
+ label="LORA/Model",
727
+ value="Cossale/Frames2-Flex.1",
728
+ elem_classes="solid",
729
+ elem_id="inference_model",
730
+ visible=False
731
+ )
732
+ # Update map_options to a Dropdown with choices from constants.PROMPTS keys
733
+ with gr.Row():
734
+ with gr.Column():
735
+ map_options = gr.Dropdown(
736
+ label="Map Options",
737
+ choices=list(constants.PROMPTS.keys()),
738
+ value="Alien Landscape",
739
+ elem_classes="solid",
740
+ scale=0
741
+ )
742
+ with gr.Column():
743
+ # Add Dropdown for sizing of Images, height and width based on selection. Options are 16x9, 16x10, 4x5, 1x1
744
+ # The values of height and width are based on common resolutions for each aspect ratio
745
+ # Default to 16x9, 912x512
746
+ image_size_ratio = gr.Dropdown(label="Image Size", choices=["16:9", "16:10", "4:5", "4:3", "2:1","3:2","1:1", "9:16", "10:16", "5:4", "3:4","1:2", "2:3"], value="16:9", elem_classes="solid", type="value", scale=0, interactive=True)
747
+ with gr.Column():
748
+ seed_slider = gr.Slider(
749
+ label="Seed",
750
+ minimum=0,
751
+ maximum=constants.MAX_SEED,
752
+ step=1,
753
+ value=0,
754
+ scale=0
755
+ )
756
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True, scale=0, interactive=True)
757
+ prompt_textbox = gr.Textbox(
758
+ label="Prompt",
759
+ visible=False,
760
+ elem_classes="solid",
761
+ value="top-down, (rectangular tabletop_map) alien planet map, Battletech_boardgame scifi world with forests, lakes, oceans, continents and snow at the top and bottom, (middle is dark, no_reflections, no_shadows), from directly above. From 100,000 feet looking straight down",
762
+ lines=4
763
+ )
764
+ negative_prompt_textbox = gr.Textbox(
765
+ label="Negative Prompt",
766
+ visible=False,
767
+ elem_classes="solid",
768
+ value="Earth, low quality, bad anatomy, blurry, cropped, worst quality, shadows, people, humans, reflections, shadows, realistic map of the Earth, isometric, text"
769
+ )
770
+ prompt_notes_label = gr.Label(
771
+ "You should use FRM$ as trigger words. @1.5 minutes",
772
+ elem_classes="solid centered small",
773
+ show_label=False,
774
+ visible=False
775
+ )
776
+ # Keep the change event to maintain functionality
777
+ map_options.change(
778
+ fn=update_prompt_visibility,
779
+ inputs=[map_options],
780
+ outputs=[prompt_textbox, negative_prompt_textbox, prompt_notes_label]
781
+ )
782
+ with gr.Row():
783
+ generate_input_image = gr.Button(
784
+ "Generate AI Image",
785
+ elem_id="generate_input_image",
786
+ elem_classes="solid"
787
+ )
788
+ with gr.Column(scale=2):
789
+ with gr.Accordion("Template Image Styles", open = False):
790
+ with gr.Row():
791
+ # Gallery from PRE_RENDERED_IMAGES GOES HERE
792
+ prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images(constants.pre_rendered_maps_paths), elem_id="gallery", elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
793
+ with gr.Row():
794
+ image_guidance_stength = gr.Slider(label="Image Guidance Strength", minimum=0, maximum=1.0, value=0.25, step=0.01, interactive=True)
795
+ with gr.Column():
796
+ replace_input_image_button = gr.Button(
797
+ "Replace Input Image",
798
+ elem_id="prerendered_replace_input_image_button",
799
+ elem_classes="solid"
800
+ )
801
+ with gr.Column():
802
+ generate_input_image_from_gallery = gr.Button(
803
+ "Generate AI Image from Gallery",
804
+ elem_id="generate_input_image_from_gallery",
805
+ elem_classes="solid"
806
+ )
807
+
808
+ with gr.Accordion("Advanced Hexagon Settings", open = False):
809
+ with gr.Row():
810
+ start_x = gr.Number(label="Start X", value=0, minimum=-512, maximum= 512, precision=0)
811
+ start_y = gr.Number(label="Start Y", value=0, minimum=-512, maximum= 512, precision=0)
812
+ end_x = gr.Number(label="End X", value=0, minimum=-512, maximum= 512, precision=0)
813
+ end_y = gr.Number(label="End Y", value=0, minimum=-512, maximum= 512, precision=0)
814
+ with gr.Row():
815
+ x_spacing = gr.Number(label="Adjust Horizontal spacing", value=-1, minimum=-200, maximum=200, precision=1)
816
+ y_spacing = gr.Number(label="Adjust Vertical spacing", value=1, minimum=-200, maximum=200, precision=1)
817
+ with gr.Row():
818
+ rotation = gr.Slider(-90, 180, 0.0, 0.1, label="Hexagon Rotation (degree)")
819
+ add_hex_text = gr.Dropdown(label="Add Text to Hexagons", choices=[None, "Row-Column Coordinates", "Sequential Numbers", "Playing Cards Sequential", "Playing Cards Alternate Red and Black", "Custom List"], value=None)
820
+ with gr.Row():
821
+ custom_text_list = gr.TextArea(label="Custom Text List", value=constants.cards_alternating, visible=False,)
822
+ custom_text_color_list = gr.TextArea(label="Custom Text Color List", value=constants.card_colors_alternating, visible=False)
823
+ with gr.Row():
824
+ hex_text_info = gr.Markdown("""
825
+ ### Text Color uses the Border Color and Border Opacity, unless you use a custom list.
826
+ ### The Custom Text List and Custom Text Color List are comma separated lists.
827
+ ### The custom color list is a comma separated list of hex colors.
828
+ #### Example: "A,2,3,4,5,6,7,8,9,10,J,Q,K", "red,#0000FF,#00FF00,red,#FFFF00,#00FFFF,#FF8000,#FF00FF,#FF0080,#FF8000,#FF0080,lightblue"
829
+ """, elem_id="hex_text_info", visible=False)
830
+ add_hex_text.change(
831
+ fn=lambda x: (
832
+ gr.update(visible=(x == "Custom List")),
833
+ gr.update(visible=(x == "Custom List")),
834
+ gr.update(visible=(x != None))
835
+ ),
836
+ inputs=add_hex_text,
837
+ outputs=[custom_text_list, custom_text_color_list, hex_text_info]
838
+ )
839
  with gr.Row():
840
+ hex_size = gr.Number(label="Hexagon Size", value=32, minimum=1, maximum=768)
841
+ border_size = gr.Slider(-5,25,value=0,step=1,label="Border Size")
842
+ with gr.Row():
843
+ background_color = gr.ColorPicker(label="Background Color", value="#000000", interactive=True)
844
+ background_opacity = gr.Slider(0,100,0,1,label="Background Opacity %")
845
+ border_color = gr.ColorPicker(label="Border Color", value="#7b7b7b", interactive=True)
846
+ border_opacity = gr.Slider(0,100,0,1,label="Border Opacity %")
847
  with gr.Row():
848
+ hex_button = gr.Button("Generate Hex Grid!", elem_classes="solid", elem_id="btn-generate")
 
849
  with gr.Row():
850
+ output_image = gr.Image(label="Hexagon Grid Image", image_mode = "RGBA", show_download_button=True, show_share_button=True,elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgOutput")
851
+ overlay_image = gr.Image(label="Hexagon Overlay Image", image_mode = "RGBA", show_share_button=True, elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgOverlay")
852
  with gr.Row():
853
+ output_overlay_composite = gr.Slider(0,100,50,0.5, label="Interpolate Intensity")
854
+ output_blend_multiply_composite = gr.Slider(0,100,50,0.5, label="Overlay Intensity")
855
+ output_alpha_composite = gr.Slider(0,100,50,0.5, label="Alpha Composite Intensity")
856
+ with gr.Accordion("Add Margins (bleed)", open=False):
857
+ with gr.Row():
858
+ border_image_source = gr.Radio(label="Add Margins around which Image", choices=["Input Image", "Overlay Image"], value="Overlay Image")
859
+ with gr.Row():
860
+ mask_width = gr.Number(label="Margins Width", value=10, minimum=0, maximum=100, precision=0)
861
+ mask_height = gr.Number(label="Margins Height", value=10, minimum=0, maximum=100, precision=0)
862
+ with gr.Row():
863
+ margin_color = gr.ColorPicker(label="Margin Color", value="#333333FF", interactive=True)
864
+ margin_opacity = gr.Slider(0,100,95,0.5,label="Margin Opacity %")
865
+ with gr.Row():
866
+ add_border_button = gr.Button("Add Margins", elem_classes="solid", variant="secondary")
867
+ with gr.Row():
868
+ bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", show_download_button=True, show_share_button=True, elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered")
869
 
870
+ with gr.Accordion("Height Maps and 3D", open = False):
871
+ with gr.Row():
872
+ with gr.Column():
873
+ voxel_size_factor = gr.Slider(label="Voxel Size Factor", value=1.00, minimum=0.01, maximum=40.00, step=0.01)
874
+ with gr.Column():
875
+ depth_image_source = gr.Radio(label="Depth Image Source", choices=["Input Image", "Output Image", "Overlay Image","Image with Margins"], value="Input Image")
876
+ with gr.Row():
877
+ generate_depth_button = gr.Button("Generate Depth Map and 3D Model From Selected Image", elem_classes="solid", variant="secondary")
878
+ with gr.Row():
879
+ depth_map_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgDepth")
880
+ model_output = gr.Model3D(label="3D Model", clear_color=[1.0, 1.0, 1.0, 0.25], key="Img3D", elem_classes="centered solid imgcontainer")
881
  with gr.Row():
882
+ gr.Examples(examples=[
883
+ ["assets//examples//hex_map_p1.png", False, True, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 15],
884
+ ["assets//examples//hex_map_p1_overlayed.png", False, False, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 75],
885
+ ["assets//examples//hex_flower_logo.png", False, True, -95,-95,100,100,-24,-2,190,30,2,"#FF8951", 50],
886
+ ["assets//examples//hexed_fract_1.png", False, True, 0,0,0,0,0,0,10,0,0,"#000000", 5],
887
+ ["assets//examples//tmpzt3mblvk.png", False, True, -20,10,0,0,-6,-2,35,30,1,"#ffffff", 0],
888
+ ],
889
+ inputs=[input_image, filter_color, fill_hex, start_x, start_y, end_x, end_y, x_spacing, y_spacing, hex_size, rotation, border_size, border_color, border_opacity],
890
+ elem_id="examples")
891
+ #with gr.Row():
892
+ #gr.HTML(value=versions_html(), visible=True, elem_id="versions")
893
  with gr.Row():
894
+ reinstall_torch = gr.Button("Reinstall Torch", elem_classes="solid small", variant="secondary")
895
+ reinstall_cuda_toolkit = gr.Button("Install CUDA Toolkit", elem_classes="solid small", variant="secondary")
896
+ reinitialize_cuda = gr.Button("Reinitialize CUDA", elem_classes="solid small", variant="secondary")
897
+ torch_release = gr.Button("Release Torch Resources", elem_classes="solid small", variant="secondary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
898
 
899
+ reinitialize_cuda.click(
900
+ fn=initialize_cuda,
901
+ inputs=[],
902
+ outputs=[]
903
+ )
904
+ torch_release.click(
905
+ fn=release_torch_resources,
906
+ inputs=[],
907
+ outputs=[]
908
+ )
909
+ reinstall_torch.click(
910
+ fn=install_torch,
911
+ inputs=[],
912
+ outputs=[]
913
+ )
914
 
915
+ reinstall_cuda_toolkit.click(
916
+ fn=install_cuda_toolkit,
917
+ inputs=[],
918
+ outputs=[]
919
+ )
920
+
921
+ color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
922
+ color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
923
+
924
+ delete_button.click(fn=delete_color, inputs=[selected_row, color_display], outputs=[color_display])
925
+ exclude_color_button.click(fn=add_color, inputs=[color_picker, gr.State(excluded_color_list)], outputs=[color_display, gr.State(excluded_color_list)])
926
+ hex_button.click(
927
+ fn=lambda hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list:
928
+ gr.Warning("Please upload an Input Image to get started.") if input_image is None else hex_create(hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list),
929
+ inputs=[hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list],
930
+ outputs=[output_image, overlay_image],
931
+ scroll_to_output=True
932
+ )
933
+ generate_input_image.click(
934
+ fn=generate_input_image_click,
935
+ inputs=[map_options, prompt_textbox, negative_prompt_textbox, model_textbox, randomize_seed, seed_slider, gr.State(False), gr.State(0.5), image_size_ratio],
936
+ outputs=[input_image], scroll_to_output=True
937
+ )
938
+ generate_depth_button.click(
939
+ fn=generate_depth_button_click,
940
+ inputs=[depth_image_source, voxel_size_factor, input_image, output_image, overlay_image, bordered_image_output],
941
+ outputs=[depth_map_output, model_output], scroll_to_output=True
942
+ )
943
+ model_textbox.change(
944
+ fn=update_prompt_notes,
945
+ inputs=model_textbox,
946
+ outputs=prompt_notes_label,preprocess=False
947
+ )
948
+ model_options.change(
949
+ fn=lambda x: (gr.update(visible=(x == "Manual Entry")), gr.update(value=x) if x != "Manual Entry" else gr.update()),
950
+ inputs=model_options,
951
+ outputs=[model_textbox, model_textbox]
952
+ )
953
+ model_options.change(
954
+ fn=update_prompt_notes,
955
+ inputs=model_options,
956
+ outputs=prompt_notes_label
957
+ )
958
+ composite_button.click(
959
+ fn=lambda input_image, composite_color, composite_opacity: gr.Warning("Please upload an Input Image to get started.") if input_image is None else change_color(input_image, composite_color, composite_opacity),
960
+ inputs=[input_image, composite_color, composite_opacity],
961
+ outputs=[input_image]
962
+ )
963
+
964
+ #use conditioned_image as the input_image for generate_input_image_click
965
+ generate_input_image_from_gallery.click(
966
+ fn=generate_input_image_click,
967
+ inputs=[map_options, prompt_textbox, negative_prompt_textbox, model_textbox,randomize_seed, seed_slider, gr.State(True), image_guidance_stength, image_size_ratio],
968
+ outputs=[input_image], scroll_to_output=True
969
+ )
970
+
971
+ # Update the state variable with the prerendered image filepath when an image is selected
972
+ prerendered_image_gallery.select(
973
+ fn=on_prerendered_gallery_selection,
974
+ inputs=None,
975
+ outputs=[gr.State(current_prerendered_image)], # Update the state with the selected image
976
+ show_api=False
977
+ )
978
+ # replace input image with selected gallery image
979
+ replace_input_image_button.click(
980
+ lambda: current_prerendered_image.value,
981
+ inputs=None,
982
+ outputs=[input_image], scroll_to_output=True
983
+ )
984
+ output_overlay_composite.change(
985
+ fn=combine_images_with_lerp,
986
+ inputs=[input_image, output_image, output_overlay_composite],
987
+ outputs=[overlay_image], scroll_to_output=True
988
+ )
989
+ output_blend_multiply_composite.change(
990
+ fn=multiply_and_blend_images,
991
+ inputs=[input_image, output_image, output_blend_multiply_composite],
992
+ outputs=[overlay_image],
993
+ scroll_to_output=True
994
+ )
995
+ output_alpha_composite.change(
996
+ fn=alpha_composite_with_control,
997
+ inputs=[input_image, output_image, output_alpha_composite],
998
+ outputs=[overlay_image],
999
+ scroll_to_output=True
1000
+ )
1001
+ add_border_button.click(
1002
+ fn=lambda image_source, mask_w, mask_h, color, opacity, input_img, overlay_img: add_border(input_img if image_source == "Input Image" else overlay_img, mask_w, mask_h, update_color_opacity(detect_color_format(color), opacity * 2.55)),
1003
+ inputs=[border_image_source, mask_width, mask_height, margin_color, margin_opacity, input_image, overlay_image],
1004
+ outputs=[bordered_image_output],
1005
+ scroll_to_output=True
1006
+ )
1007
+ (())
1008
  beeuty.queue(default_concurrency_limit=1,max_size=12,api_open=False)
1009
+ beeuty.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")
1010
+
1011
+
1012
+ if __name__ == "__main__":
1013
+ logging.basicConfig(
1014
+ format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
1015
+ )
1016
+ logging.info("Environment Variables: %s" % os.environ)
1017
+ if _get_output(["nvcc", "--version"]) is None:
1018
+ logging.info("Installing CUDA toolkit...")
1019
+ install_cuda_toolkit()
1020
+ else:
1021
+ logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
1022
+
1023
+ logging.info("Installing CUDA extensions...")
1024
+ setup_runtime_env()
1025
+ main(os.getenv("DEBUG") == "1")
pre-requirements.txt CHANGED
@@ -1 +1,5 @@
1
- pip>=25.0
 
 
 
 
 
1
+ pip>=25.0.1
2
+ #torch==2.4.0 --extra-index-url https://download.pytorch.org/whl/cu124
3
+ #torch-2.4.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=2cb28155635e3d3d0be198e3f3e7457a1d7b99e8c2eedc73fe22fab574d11a4c
4
+ #torchvision==0.19.0 --extra-index-url https://download.pytorch.org/whl/cu124
5
+ #/torchvision-0.19.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=82cf10450537aeb9584ceaf53633f177bb809d563c5d64526f4b9be7668b2769
requirements.txt CHANGED
@@ -1,37 +1,44 @@
 
 
 
1
  accelerate
 
 
2
  invisible_watermark
 
3
  # Updated versions 2.4.0+cu124
4
- #torch==2.4.0 --index-url https://download.pytorch.org/whl/cu124/torch-2.4.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=2cb28155635e3d3d0be198e3f3e7457a1d7b99e8c2eedc73fe22fab574d11a4c
5
- #torchvision --index-url https://download.pytorch.org/whl/cu124
6
- #torchaudio --index-url https://download.pytorch.org/whl/cu124
 
7
  #xformers --index-url https://download.pytorch.org/whl/cu124
8
  # ==0.0.27.post2 --index-url https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl#sha256=b3cdeeb9eae4547805ab8c3c645ac2fa9c6da85b46c039d9befa117e9f6f22fe
9
 
10
  #generic Torch versions
 
11
  torch
12
- xformers
 
13
 
14
  # Other dependencies
15
  Haishoku
16
  pybind11>=2.12
17
  huggingface_hub
18
- # git+https://github.com/huggingface/[email protected].1#egg=transformers
19
- transformers==4.48.1
20
- gradio[oauth]
21
  Pillow>=11.0.0
22
  numpy
23
  requests
24
- git+https://github.com/huggingface/diffusers
25
- #diffusers[torch]
26
  peft
27
  opencv-python
28
  open3d
29
- protobuf
30
  safetensors
31
  sentencepiece
32
  git+https://github.com/asomoza/image_gen_aux.git
33
  #git+https://github.com/huggingface/optimum.git
34
- #git+https://github.com/triton-lang/triton.git -not windows supported --disable in environment variable
35
  tiktoken
36
  #pilmoji[requests]==2.0.4
37
  #emoji==2.2.0
@@ -41,4 +48,5 @@ pangocffi
41
  pangocairocffi
42
  #tensorflow
43
  cairosvg
44
- python-dotenv
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ #diffusers[torch]==0.32.2
3
+ transformers
4
  accelerate
5
+ safetensors
6
+ sentencepiece
7
  invisible_watermark
8
+
9
  # Updated versions 2.4.0+cu124
10
+ #--extra-index-url https://download.pytorch.org/whl/cu124
11
+ #torch==2.6.0 --index-url https://download.pytorch.org/whl/cu124/torch-2.4.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=2cb28155635e3d3d0be198e3f3e7457a1d7b99e8c2eedc73fe22fab574d11a4c
12
+ #torchvision==0.21.0 --index-url https://download.pytorch.org/whl/cu124/torchvision-0.19.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=82cf10450537aeb9584ceaf53633f177bb809d563c5d64526f4b9be7668b2769
13
+ #torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu124
14
  #xformers --index-url https://download.pytorch.org/whl/cu124
15
  # ==0.0.27.post2 --index-url https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl#sha256=b3cdeeb9eae4547805ab8c3c645ac2fa9c6da85b46c039d9befa117e9f6f22fe
16
 
17
  #generic Torch versions
18
+ #--extra-index-url https://download.pytorch.org/whl/cu124
19
  torch
20
+ torchvision
21
+ #xformers #==0.0.29.post3
22
 
23
  # Other dependencies
24
  Haishoku
25
  pybind11>=2.12
26
  huggingface_hub
27
+ # git+https://github.com/huggingface/[email protected].3#egg=transformers
28
+ #gradio[oauth]
 
29
  Pillow>=11.0.0
30
  numpy
31
  requests
32
+
 
33
  peft
34
  opencv-python
35
  open3d
36
+ protobuf #==3.20.3
37
  safetensors
38
  sentencepiece
39
  git+https://github.com/asomoza/image_gen_aux.git
40
  #git+https://github.com/huggingface/optimum.git
41
+ #git+https://github.com/triton-lang/triton.git #-not windows supported --disable in environment variable
42
  tiktoken
43
  #pilmoji[requests]==2.0.4
44
  #emoji==2.2.0
 
48
  pangocairocffi
49
  #tensorflow
50
  cairosvg
51
+ python-dotenv
52
+ #git+https://github.com/gradio-app/[email protected]#egg=gradio
src/condition.py CHANGED
@@ -1,4 +1,6 @@
1
- import torch
 
 
2
  from typing import Optional, Union, List, Tuple
3
  from diffusers.pipelines import FluxPipeline
4
  from PIL import Image, ImageFilter
@@ -13,13 +15,13 @@ condition_dict = {
13
  "deblurring": 7,
14
  "fill": 9,
15
  }
16
-
17
  class Condition(object):
18
  def __init__(
19
  self,
20
  condition_type: str,
21
- raw_img: Union[Image.Image, torch.Tensor] = None,
22
- condition: Union[Image.Image, torch.Tensor] = None,
23
  mask=None,
24
  ) -> None:
25
  self.condition_type = condition_type
@@ -31,8 +33,8 @@ class Condition(object):
31
  # TODO: Add mask support
32
  assert mask is None, "Mask not supported yet"
33
  def get_condition(
34
- self, condition_type: str, raw_img: Union[Image.Image, torch.Tensor]
35
- ) -> Union[Image.Image, torch.Tensor]:
36
  """
37
  Returns the condition image.
38
  """
@@ -77,7 +79,7 @@ class Condition(object):
77
  Returns the type id of the condition.
78
  """
79
  return condition_dict[condition_type]
80
- def _encode_image(self, pipe: FluxPipeline, cond_img: Image.Image) -> torch.Tensor:
81
  """
82
  Encodes an image condition into tokens using the pipeline.
83
  """
@@ -96,7 +98,7 @@ class Condition(object):
96
  pipe.dtype,
97
  )
98
  return cond_tokens, cond_ids
99
- def encode(self, pipe: FluxPipeline) -> Tuple[torch.Tensor, torch.Tensor, int]:
100
  """
101
  Encodes the condition into tokens, ids and type_id.
102
  """
@@ -113,5 +115,5 @@ class Condition(object):
113
  raise NotImplementedError(
114
  f"Condition type {self.condition_type} not implemented"
115
  )
116
- type_id = torch.ones_like(ids[:, :1]) * self.type_id
117
  return tokens, ids, type_id
 
1
+ import spaces
2
+ import gradio as gr
3
+ from torch import Tensor, ones_like
4
  from typing import Optional, Union, List, Tuple
5
  from diffusers.pipelines import FluxPipeline
6
  from PIL import Image, ImageFilter
 
15
  "deblurring": 7,
16
  "fill": 9,
17
  }
18
+ @spaces.GPU(progress=gr.Progress(track_tqdm=True))
19
  class Condition(object):
20
  def __init__(
21
  self,
22
  condition_type: str,
23
+ raw_img: Union[Image.Image, Tensor] = None,
24
+ condition: Union[Image.Image,Tensor] = None,
25
  mask=None,
26
  ) -> None:
27
  self.condition_type = condition_type
 
33
  # TODO: Add mask support
34
  assert mask is None, "Mask not supported yet"
35
  def get_condition(
36
+ self, condition_type: str, raw_img: Union[Image.Image, Tensor]
37
+ ) -> Union[Image.Image, Tensor]:
38
  """
39
  Returns the condition image.
40
  """
 
79
  Returns the type id of the condition.
80
  """
81
  return condition_dict[condition_type]
82
+ def _encode_image(self, pipe: FluxPipeline, cond_img: Image.Image) -> Tensor:
83
  """
84
  Encodes an image condition into tokens using the pipeline.
85
  """
 
98
  pipe.dtype,
99
  )
100
  return cond_tokens, cond_ids
101
+ def encode(self, pipe: FluxPipeline) -> Tuple[Tensor, Tensor, int]:
102
  """
103
  Encodes the condition into tokens, ids and type_id.
104
  """
 
115
  raise NotImplementedError(
116
  f"Condition type {self.condition_type} not implemented"
117
  )
118
+ type_id = ones_like(ids[:, :1]) * self.type_id
119
  return tokens, ids, type_id
utils/ai_generator.py CHANGED
@@ -2,11 +2,11 @@
2
  import gradio as gr
3
  import os
4
  import time
5
- from turtle import width # Added for implementing delays
6
- import torch
7
  import random
8
  from utils.ai_generator_diffusers_flux import generate_ai_image_local
9
- from pathlib import Path
10
  from huggingface_hub import InferenceClient
11
  import requests
12
  import io
@@ -38,12 +38,14 @@ def generate_ai_image(
38
  width=912,
39
  height=512,
40
  strength=0.5,
41
- seed = random.randint(0, constants.MAX_SEED),
42
  progress=gr.Progress(track_tqdm=True),
43
  *args,
44
  **kwargs
45
- ):
46
- if (torch.cuda.is_available() and torch.cuda.device_count() >= 1):
 
 
47
  print("Local GPU available. Generating image locally.")
48
  if conditioned_image is not None:
49
  pipeline = "FluxImg2ImgPipeline"
 
2
  import gradio as gr
3
  import os
4
  import time
5
+ #from turtle import width # Added for implementing delays
6
+ from torch import cuda
7
  import random
8
  from utils.ai_generator_diffusers_flux import generate_ai_image_local
9
+ #from pathlib import Path
10
  from huggingface_hub import InferenceClient
11
  import requests
12
  import io
 
38
  width=912,
39
  height=512,
40
  strength=0.5,
41
+ seed = 0,
42
  progress=gr.Progress(track_tqdm=True),
43
  *args,
44
  **kwargs
45
+ ):
46
+ if seed == 0:
47
+ seed = random.randint(0, constants.MAX_SEED)
48
+ if (cuda.is_available() and cuda.device_count() >= 1): # Check if a local GPU is available
49
  print("Local GPU available. Generating image locally.")
50
  if conditioned_image is not None:
51
  pipeline = "FluxImg2ImgPipeline"
utils/ai_generator_diffusers_flux.py CHANGED
@@ -1,19 +1,19 @@
1
  # utils/ai_generator_diffusers_flux.py
2
- import spaces
3
- import gradio as gr
4
  import os
5
  import utils.constants as constants
6
- import torch
 
 
7
  from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
8
- import accelerate
9
  from transformers import AutoTokenizer
10
  import safetensors
11
- import xformers
12
- from diffusers.utils import load_image
13
- from huggingface_hub import hf_hub_download
14
  from PIL import Image
15
  from tempfile import NamedTemporaryFile
16
- from src.condition import Condition
17
  from utils.image_utils import (
18
  crop_and_resize_image,
19
  )
@@ -21,23 +21,26 @@ from utils.version_info import (
21
  get_torch_info,
22
  get_diffusers_version,
23
  get_transformers_version,
24
- get_xformers_version
 
 
25
  )
 
26
  from utils.lora_details import get_trigger_words, approximate_token_count, split_prompt_precisely
27
- from utils.color_utils import detect_color_format
28
- import utils.misc as misc
29
- from pathlib import Path
30
  import warnings
31
  warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*")
32
- #print(torch.__version__) # Ensure it's 2.0 or newer
33
- #print(torch.cuda.is_available()) # Ensure CUDA is available
34
 
35
  PIPELINE_CLASSES = {
36
  "FluxPipeline": FluxPipeline,
37
  "FluxImg2ImgPipeline": FluxImg2ImgPipeline,
38
  "FluxControlPipeline": FluxControlPipeline
39
  }
40
- @spaces.GPU(duration=140)
41
  def generate_image_from_text(
42
  text,
43
  model_name="black-forest-labs/FLUX.1-dev",
@@ -51,13 +54,14 @@ def generate_image_from_text(
51
  additional_parameters=None,
52
  progress=gr.Progress(track_tqdm=True)
53
  ):
54
- device = "cuda" if torch.cuda.is_available() else "cpu"
 
55
  print(f"device:{device}\nmodel_name:{model_name}\n")
56
 
57
  # Initialize the pipeline
58
  pipe = FluxPipeline.from_pretrained(
59
  model_name,
60
- torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
61
  ).to(device)
62
  pipe.enable_model_cpu_offload()
63
 
@@ -88,7 +92,7 @@ def generate_image_from_text(
88
  pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
89
 
90
  # Set the random seed for reproducibility
91
- generator = torch.Generator(device=device).manual_seed(seed)
92
  conditions = []
93
 
94
  # Handle conditioned image if provided
@@ -122,12 +126,12 @@ def generate_image_from_text(
122
  del conditions
123
  del generator
124
  del pipe
125
- torch.cuda.empty_cache()
126
- torch.cuda.ipc_collect()
127
 
128
  return image
129
 
130
- @spaces.GPU(duration=140)
131
  def generate_image_lowmem(
132
  text,
133
  neg_prompt=None,
@@ -141,50 +145,59 @@ def generate_image_lowmem(
141
  seed=0,
142
  true_cfg_scale=1.0,
143
  pipeline_name="FluxPipeline",
144
- strength=0.75,
145
  additional_parameters=None,
146
  progress=gr.Progress(track_tqdm=True)
147
- ):
148
  # Retrieve the pipeline class from the mapping
149
  pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
150
  if not pipeline_class:
151
  raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
152
  f"Available options: {list(PIPELINE_CLASSES.keys())}")
153
 
154
- device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
155
  print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
156
  print(f"\n {get_torch_info()}\n")
157
  # Disable gradient calculations
158
- with torch.no_grad():
159
  # Initialize the pipeline inside the context manager
160
  pipe = pipeline_class.from_pretrained(
161
  model_name,
162
- torch_dtype=torch.bfloat16 if device == "cuda" else torch.bfloat32
163
  ).to(device)
164
  # Optionally, don't use CPU offload if not necessary
165
- pipe.enable_model_cpu_offload()
166
  # alternative version that may be more efficient
167
  # pipe.enable_sequential_cpu_offload()
 
 
 
 
 
 
168
 
169
  # Access the tokenizer from the pipeline
170
  tokenizer = pipe.tokenizer
171
 
172
  # Check if add_prefix_space is set and convert to slow tokenizer if necessary
173
  if getattr(tokenizer, 'add_prefix_space', False):
174
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
175
  # Update the pipeline's tokenizer
176
  pipe.tokenizer = tokenizer
 
177
 
178
- flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
179
  if flash_attention_enabled == False:
180
  #Enable xFormers memory-efficient attention (optional)
181
- pipe.enable_xformers_memory_efficient_attention()
182
  print("\nEnabled xFormers memory-efficient attention.\n")
183
  else:
184
  pipe.attn_implementation="flash_attention_2"
185
  print("\nEnabled flash_attention_2.\n")
186
- if pipeline_name == "FluxPipeline":
187
- pipe.enable_vae_tiling()
188
  condition_type = "subject"
189
  # Load LoRA weights
190
  # note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
@@ -275,7 +288,7 @@ def generate_image_lowmem(
275
  else:
276
  pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
277
  # Set the random seed for reproducibility
278
- generator = torch.Generator(device=device).manual_seed(seed)
279
  conditions = []
280
  if conditioned_image is not None:
281
  conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
@@ -327,9 +340,10 @@ def generate_image_lowmem(
327
  del generator
328
  # Delete the pipeline and clear cache
329
  del pipe
330
- torch.cuda.empty_cache()
331
- torch.cuda.ipc_collect()
332
- print(torch.cuda.memory_summary(device=None, abbreviated=False))
 
333
  return image
334
 
335
  def generate_ai_image_local (
@@ -348,6 +362,8 @@ def generate_ai_image_local (
348
  strength=0.75,
349
  progress=gr.Progress(track_tqdm=True)
350
  ):
 
 
351
  try:
352
  if map_option != "Prompt":
353
  prompt = constants.PROMPTS[map_option]
@@ -387,6 +403,7 @@ def generate_ai_image_local (
387
  print(f"Seed: {seed}")
388
  print(f"Additional Parameters: {additional_parameters}")
389
  print(f"Conditioned Image: {conditioned_image}")
 
390
  print(f"pipeline: {pipeline_name}")
391
  image = generate_image_lowmem(
392
  text=prompt,
@@ -400,15 +417,18 @@ def generate_ai_image_local (
400
  num_inference_steps=num_inference_steps,
401
  seed=seed,
402
  pipeline_name=pipeline_name,
 
403
  additional_parameters=additional_parameters
404
  )
405
  with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
406
  image.save(tmp.name, format="PNG")
407
  constants.temp_files.append(tmp.name)
408
  print(f"Image saved to {tmp.name}")
 
409
  return tmp.name
410
  except Exception as e:
411
  print(f"Error generating AI image: {e}")
 
412
  return None
413
 
414
  # does not work
@@ -419,7 +439,7 @@ def merge_LoRA_weights(model="black-forest-labs/FLUX.1-dev",
419
  if model_suffix not in lora_weights:
420
  raise ValueError(f"The model suffix '{model_suffix}' must be in the lora_weights string '{lora_weights}' to proceed.")
421
 
422
- pipe = FluxPipeline.from_pretrained(model, torch_dtype=torch.bfloat16)
423
  pipe.load_lora_weights(lora_weights)
424
  pipe.save_lora_weights(os.getenv("TMPDIR"))
425
  lora_name = lora_weights.split("/")[-1] + "-merged"
 
1
  # utils/ai_generator_diffusers_flux.py
 
 
2
  import os
3
  import utils.constants as constants
4
+ #import spaces
5
+ import gradio as gr
6
+ from torch import __version__ as torch_version_, version, cuda, bfloat16, float32, Generator, no_grad, backends
7
  from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
8
+ #import accelerate
9
  from transformers import AutoTokenizer
10
  import safetensors
11
+ #import xformers
12
+ #from diffusers.utils import load_image
13
+ #from huggingface_hub import hf_hub_download
14
  from PIL import Image
15
  from tempfile import NamedTemporaryFile
16
+
17
  from utils.image_utils import (
18
  crop_and_resize_image,
19
  )
 
21
  get_torch_info,
22
  get_diffusers_version,
23
  get_transformers_version,
24
+ get_xformers_version,
25
+ initialize_cuda,
26
+ release_torch_resources
27
  )
28
+ import gc
29
  from utils.lora_details import get_trigger_words, approximate_token_count, split_prompt_precisely
30
+ #from utils.color_utils import detect_color_format
31
+ #import utils.misc as misc
32
+ #from pathlib import Path
33
  import warnings
34
  warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*")
35
+ #print(torch_version_) # Ensure it's 2.0 or newer
36
+ #print(cuda.is_available()) # Ensure CUDA is available
37
 
38
  PIPELINE_CLASSES = {
39
  "FluxPipeline": FluxPipeline,
40
  "FluxImg2ImgPipeline": FluxImg2ImgPipeline,
41
  "FluxControlPipeline": FluxControlPipeline
42
  }
43
+ #@spaces.GPU()
44
  def generate_image_from_text(
45
  text,
46
  model_name="black-forest-labs/FLUX.1-dev",
 
54
  additional_parameters=None,
55
  progress=gr.Progress(track_tqdm=True)
56
  ):
57
+ from src.condition import Condition
58
+ device = "cuda" if cuda.is_available() else "cpu"
59
  print(f"device:{device}\nmodel_name:{model_name}\n")
60
 
61
  # Initialize the pipeline
62
  pipe = FluxPipeline.from_pretrained(
63
  model_name,
64
+ torch_dtype=bfloat16 if device == "cuda" else float32
65
  ).to(device)
66
  pipe.enable_model_cpu_offload()
67
 
 
92
  pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
93
 
94
  # Set the random seed for reproducibility
95
+ generator = Generator(device=device).manual_seed(seed)
96
  conditions = []
97
 
98
  # Handle conditioned image if provided
 
126
  del conditions
127
  del generator
128
  del pipe
129
+ cuda.empty_cache()
130
+ cuda.ipc_collect()
131
 
132
  return image
133
 
134
+ #@spaces.GPU(progress=gr.Progress(track_tqdm=True))
135
  def generate_image_lowmem(
136
  text,
137
  neg_prompt=None,
 
145
  seed=0,
146
  true_cfg_scale=1.0,
147
  pipeline_name="FluxPipeline",
148
+ strength=0.75,
149
  additional_parameters=None,
150
  progress=gr.Progress(track_tqdm=True)
151
+ ):
152
  # Retrieve the pipeline class from the mapping
153
  pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
154
  if not pipeline_class:
155
  raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
156
  f"Available options: {list(PIPELINE_CLASSES.keys())}")
157
 
158
+ initialize_cuda()
159
+ device = "cuda" if cuda.is_available() else "cpu"
160
+ from src.condition import Condition
161
+
162
  print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
163
  print(f"\n {get_torch_info()}\n")
164
  # Disable gradient calculations
165
+ with no_grad():
166
  # Initialize the pipeline inside the context manager
167
  pipe = pipeline_class.from_pretrained(
168
  model_name,
169
+ torch_dtype=bfloat16 if device == "cuda" else float32
170
  ).to(device)
171
  # Optionally, don't use CPU offload if not necessary
172
+
173
  # alternative version that may be more efficient
174
  # pipe.enable_sequential_cpu_offload()
175
+ if pipeline_name == "FluxPipeline":
176
+ pipe.enable_model_cpu_offload()
177
+ pipe.vae.enable_slicing()
178
+ pipe.vae.enable_tiling()
179
+ else:
180
+ pipe.enable_model_cpu_offload()
181
 
182
  # Access the tokenizer from the pipeline
183
  tokenizer = pipe.tokenizer
184
 
185
  # Check if add_prefix_space is set and convert to slow tokenizer if necessary
186
  if getattr(tokenizer, 'add_prefix_space', False):
187
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, device_map = 'cpu')
188
  # Update the pipeline's tokenizer
189
  pipe.tokenizer = tokenizer
190
+ pipe.to(device)
191
 
192
+ flash_attention_enabled = backends.cuda.flash_sdp_enabled()
193
  if flash_attention_enabled == False:
194
  #Enable xFormers memory-efficient attention (optional)
195
+ #pipe.enable_xformers_memory_efficient_attention()
196
  print("\nEnabled xFormers memory-efficient attention.\n")
197
  else:
198
  pipe.attn_implementation="flash_attention_2"
199
  print("\nEnabled flash_attention_2.\n")
200
+
 
201
  condition_type = "subject"
202
  # Load LoRA weights
203
  # note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
 
288
  else:
289
  pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
290
  # Set the random seed for reproducibility
291
+ generator = Generator(device=device).manual_seed(seed)
292
  conditions = []
293
  if conditioned_image is not None:
294
  conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
 
340
  del generator
341
  # Delete the pipeline and clear cache
342
  del pipe
343
+ cuda.empty_cache()
344
+ cuda.ipc_collect()
345
+ print(cuda.memory_summary(device=None, abbreviated=False))
346
+
347
  return image
348
 
349
  def generate_ai_image_local (
 
362
  strength=0.75,
363
  progress=gr.Progress(track_tqdm=True)
364
  ):
365
+ release_torch_resources()
366
+ print(f"Generating image with lowmem")
367
  try:
368
  if map_option != "Prompt":
369
  prompt = constants.PROMPTS[map_option]
 
403
  print(f"Seed: {seed}")
404
  print(f"Additional Parameters: {additional_parameters}")
405
  print(f"Conditioned Image: {conditioned_image}")
406
+ print(f"Conditioned Image Strength: {strength}")
407
  print(f"pipeline: {pipeline_name}")
408
  image = generate_image_lowmem(
409
  text=prompt,
 
417
  num_inference_steps=num_inference_steps,
418
  seed=seed,
419
  pipeline_name=pipeline_name,
420
+ strength=strength,
421
  additional_parameters=additional_parameters
422
  )
423
  with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
424
  image.save(tmp.name, format="PNG")
425
  constants.temp_files.append(tmp.name)
426
  print(f"Image saved to {tmp.name}")
427
+ gc.collect()
428
  return tmp.name
429
  except Exception as e:
430
  print(f"Error generating AI image: {e}")
431
+ gc.collect()
432
  return None
433
 
434
  # does not work
 
439
  if model_suffix not in lora_weights:
440
  raise ValueError(f"The model suffix '{model_suffix}' must be in the lora_weights string '{lora_weights}' to proceed.")
441
 
442
+ pipe = FluxPipeline.from_pretrained(model, torch_dtype=bfloat16)
443
  pipe.load_lora_weights(lora_weights)
444
  pipe.save_lora_weights(os.getenv("TMPDIR"))
445
  lora_name = lora_weights.split("/")[-1] + "-merged"
utils/constants.py CHANGED
@@ -10,12 +10,18 @@ import numpy as np
10
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:256,expandable_segments:True"
11
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
12
  #os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
 
13
  os.environ['USE_FLASH_ATTENTION'] = '1'
14
- #os.environ['XFORMERS_FORCE_DISABLE_TRITON']= '1'
15
  #os.environ['XFORMERS_FORCE_DISABLE_TORCHSCRIPT']= '1'
16
- os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
17
  os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
18
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
 
 
 
 
 
19
 
20
  IS_SHARED_SPACE = "Surn/HexaGrid" in os.environ.get('SPACE_ID', '')
21
 
@@ -36,14 +42,14 @@ default_lut_example_img = "./LUT/daisy.jpg"
36
  MAX_SEED = np.iinfo(np.int32).max
37
 
38
  PROMPTS = {
39
- "BorderBlack": "eight_color (tabletop_map built from small hexagon pieces) as ((empty black on all sides), barren alien_world_map), with light_blue_is_rivers and brown_is_mountains and red_is_volcano and [white_is_snow at the top and bottom of map] as (four_color background: light_blue, green, tan, brown), horizontal_gradient is (brown to tan to green to light_blue to blue) and vertical_gradient is (white to blue to (green, tan and red) to blue to white), (middle is dark, no_reflections, no_shadows), ((partial hexes on edges and sides are black))",
40
- "Earth": "eight_color (tabletop_map built from small hexagon pieces) as world_map, with light_blue_is_rivers and brown_is_mountains and red_is_volcano and [white_is_snow at the top and bottom of map] as (four_color background: light_blue, green, tan, brown), horizontal_gradient is (brown to tan to green to light_blue to blue) and vertical_gradient is (white to blue to (green, tan and red) to blue to white), (middle is dark, no_reflections, no_shadows), ((partial hexes on edges and sides are black)), overhead_view, from_above",
41
- "Map3": "top-down, bird's-eye view of A mystic forest-themed map viewed from directly above, showcasing detailed terrain features such as lakes, dense forests, magical flora, and hexagonal grids. The map is designed for tabletop gaming, emphasizing clarity and strategic elements, eight_color (tabletop_map built from small hexagon pieces) as alien_forest_world_map, with light_blue_is_lakes and green_is_forests and purple_is_alien_flora and [white_is_snow at the top and bottom of map] as (four_color background: light_blue, green, purple, brown), horizontal_gradient is (green to purple to light_blue to blue) and vertical_gradient is (white to blue to (green, purple and brown) to blue to white), (middle is dark, no_reflections, no_shadows), ((partial hexes on edges and sides are black))",
42
- "Map4": "A top-down, bird's-eye view of a medieval battlefield map for tabletop games, eight_color (tabletop_map built from small hexagon pieces) as mystic_forest_world_map, with teal_is_lakes and dark_green_is_forest and violet_is_magic_fauna and [white_is_snow at the top and bottom of map] as (four_color background: teal, dark_green, violet, brown), horizontal_gradient is (brown to violet to teal to blue) and vertical_gradient is (white to blue to (dark_green, violet and brown) to blue to white), (middle is enchanted, no_reflections, no_shadows), ((partial hexes on edges and sides are black)), viewed from above, flat and illustrative, not a map of the Earth",
43
- "Alien Landscape": "top-down view of a barren alien world map made from hexagon pieces. Features light blue rivers, brown mountains, red volcanoes, and white snow at the top and bottom. Background colors: light blue, green, tan, brown. Partial hexes on edges are black.",
44
- "Alien World": "top-down view of an alien_world map built from hexagon pieces. Includes rivers, mountains, volcanoes, and snowy areas. Uses a four-color background: light blue, green, tan, brown. Partial edge hexes are black. Overhead view.",
45
- "Mystic Forest": "top-down view of a mystic forest map with lakes, dense forests, magical flora, and hexagonal grids. Designed for clarity in tabletop gaming. Background colors: light blue, green, purple, brown. Partial hexes on edges are black.",
46
- "Medieval Battlefield": "top-down view of a medieval battlefield map featuring lakes, forests, and magic fauna. Emphasizes clarity and strategy for tabletop games. Background colors: teal, dark green, violet, brown. Partial edge hexes are black. Viewed from above.",
47
  "Prompt": None # Indicates that the prompt should be taken from prompt_textbox
48
  }
49
 
@@ -152,7 +158,8 @@ LORA_WEIGHTS = [
152
  "Cossale/Frames2-Flex.1",
153
  "XLabs-AI/flux-lora-collection/anime_lora.safetensors",
154
  "XLabs-AI/flux-lora-collection/scenery_lora.safetensors",
155
- "XLabs-AI/flux-lora-collection/disney_lora.safetensors"
 
156
  ]
157
 
158
  # Map each LoRA weight to its corresponding model
@@ -164,7 +171,8 @@ LORA_TO_MODEL = {
164
  "AlekseyCalvin/HSTcolorFlexAlpha": "ostris/Flex.1-alpha",
165
  "XLabs-AI/flux-lora-collection/anime_lora.safetensors":"black-forest-labs/FLUX.1-dev",
166
  "XLabs-AI/flux-lora-collection/scenery_lora.safetensors":"black-forest-labs/FLUX.1-dev",
167
- "XLabs-AI/flux-lora-collection/disney_lora.safetensors":"black-forest-labs/FLUX.1-dev"
 
168
  }
169
  condition_type = ["depth", "canny", "subject", "coloring", "deblurring", "fill", "redux"]
170
  # Detailed LoRA weight configurations
@@ -236,6 +244,18 @@ LORA_DETAILS = {
236
  }
237
  }
238
  ],
 
 
 
 
 
 
 
 
 
 
 
 
239
  "Cossale/Frames2-Flex.1": [
240
  {
241
  "weight_name": "backdrops_v2.safetensors",
 
10
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:256,expandable_segments:True"
11
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
12
  #os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
13
+ os.environ['CUDA_MODULE_LOADING']='LAZY'
14
  os.environ['USE_FLASH_ATTENTION'] = '1'
15
+ os.environ['XFORMERS_FORCE_DISABLE_TRITON']= '1'
16
  #os.environ['XFORMERS_FORCE_DISABLE_TORCHSCRIPT']= '1'
17
+ #os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
18
  os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
19
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
20
+ os.environ["NVIDIA_VISIBLE_DEVICES"] = "0"
21
+ os.environ["ZEROGPU_VERSION"] = "2"
22
+ os.environ["ZEROGPU_V2"] = "true"
23
+ os.environ["ZERO_GPU_V2"] = "true"
24
+ os.environ["ZERO_GPU_PATCH_TORCH_DEVICE"]='1'
25
 
26
  IS_SHARED_SPACE = "Surn/HexaGrid" in os.environ.get('SPACE_ID', '')
27
 
 
42
  MAX_SEED = np.iinfo(np.int32).max
43
 
44
  PROMPTS = {
45
+ "BorderBlack": "Top-down view of a hexagon-based alien map with black borders. Features rivers, mountains, volcanoes, and snow at top and bottom. Colors: light blue, green, tan, brown. No reflections or shadows. Partial hexes on edges are black.",
46
+ "Earth": "Top-down view of a hexagonal world map with rivers, mountains, volcanoes, and snow at top and bottom. Colors: light blue, green, tan, brown. No reflections or shadows. Partial edge hexes are black. Overhead view.",
47
+ "Map3": "Top-down view of a mystic forest map with lakes, dense forests, magical flora, and hex grids. Designed for tabletop gaming with clarity and strategic elements. Colors: light blue, green, purple, brown. Partial hexes on edges are black.",
48
+ "Map4": "Top-down view of a medieval battlefield map with lakes, forests, magical fauna, and hex grids. Emphasizes clarity and strategy for tabletop games. Colors: teal, dark green, violet, brown. Partial edge hexes are black. Viewed from above.",
49
+ "Alien Landscape": "Top-down view of a barren alien world map made from hexagon pieces. Features light blue rivers, brown mountains, red volcanoes, and white snow at top and bottom. Colors: light blue, green, tan, brown. Partial hexes on edges are black.",
50
+ "Alien World": "Top-down view of an alien world map built from hexagon pieces. Includes rivers, mountains, volcanoes, and snowy areas. Colors: light blue, green, tan, brown. Partial edge hexes are black. Overhead view.",
51
+ "Mystic Forest": "Top-down view of a mystic forest map with lakes, dense forests, magical flora, and hex grids. Designed for clarity in tabletop gaming. Colors: light blue, green, purple, brown. Partial hexes on edges are black.",
52
+ "Medieval Battlefield": "Top-down view of a medieval battlefield map featuring lakes, forests, and magic fauna. Emphasizes clarity and strategy for tabletop games. Colors: teal, dark green, violet, brown. Partial edge hexes are black. Viewed from above.",
53
  "Prompt": None # Indicates that the prompt should be taken from prompt_textbox
54
  }
55
 
 
158
  "Cossale/Frames2-Flex.1",
159
  "XLabs-AI/flux-lora-collection/anime_lora.safetensors",
160
  "XLabs-AI/flux-lora-collection/scenery_lora.safetensors",
161
+ "XLabs-AI/flux-lora-collection/disney_lora.safetensors",
162
+ "XLabs-AI/flux-RealismLora"
163
  ]
164
 
165
  # Map each LoRA weight to its corresponding model
 
171
  "AlekseyCalvin/HSTcolorFlexAlpha": "ostris/Flex.1-alpha",
172
  "XLabs-AI/flux-lora-collection/anime_lora.safetensors":"black-forest-labs/FLUX.1-dev",
173
  "XLabs-AI/flux-lora-collection/scenery_lora.safetensors":"black-forest-labs/FLUX.1-dev",
174
+ "XLabs-AI/flux-lora-collection/disney_lora.safetensors":"black-forest-labs/FLUX.1-dev",
175
+ "XLabs-AI/flux-RealismLora":"black-forest-labs/FLUX.1-dev"
176
  }
177
  condition_type = ["depth", "canny", "subject", "coloring", "deblurring", "fill", "redux"]
178
  # Detailed LoRA weight configurations
 
244
  }
245
  }
246
  ],
247
+ "XLabs-AI/flux-RealismLora":[
248
+ {
249
+ "notes": "No trigger words but 8k, Animatrix illustration style, fantasy style, natural photo cinematic should all work @6min"
250
+ },
251
+ {
252
+ "parameters" :{
253
+ "guidance_scale": "3.2",
254
+ "num_inference_steps": "34",
255
+ "scale": "0.85"
256
+ }
257
+ }
258
+ ],
259
  "Cossale/Frames2-Flex.1": [
260
  {
261
  "weight_name": "backdrops_v2.safetensors",
utils/misc.py CHANGED
@@ -1,6 +1,9 @@
1
  # misc.py file contains miscellaneous utility functions
2
  import math
3
  import sys
 
 
 
4
 
5
  def pause():
6
  """
@@ -67,3 +70,78 @@ def convert_ratio_to_dimensions(ratio, height=512, rotate90=False):
67
  adjusted_width, adjusted_height = adjusted_height, adjusted_width
68
  return adjusted_width, adjusted_height
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # misc.py file contains miscellaneous utility functions
2
  import math
3
  import sys
4
+ import logging
5
+ import os
6
+ import subprocess
7
 
8
  def pause():
9
  """
 
70
  adjusted_width, adjusted_height = adjusted_height, adjusted_width
71
  return adjusted_width, adjusted_height
72
 
73
+ def install_torch():
74
+ print("\nInstalling PyTorch with CUDA support...")
75
+ # Define the package and index URL
76
+ package = "torch==2.4.0"
77
+ index_url = "https://download.pytorch.org/whl/cu124"
78
+ # Construct the pip install command
79
+ command = [
80
+ "pip", "install", "--force-reinstall",
81
+ f"{package}", "--index-url", f"{index_url}"
82
+ ]
83
+ # Run the command using subprocess
84
+ subprocess.run(command, check=True)
85
+ print("\nPyTorch installation completed.")
86
+ print("\nInstalling torchvision...")
87
+ package = "torchvision==0.19.0"
88
+ index_url = "https://download.pytorch.org/whl/cu124"
89
+ # Construct the pip install command
90
+ command = [
91
+ "pip", "install", "--force-reinstall",
92
+ f"{package}", "--index-url", f"{index_url}"
93
+ ]
94
+ # Run the command using subprocess
95
+ subprocess.run(command, check=True)
96
+ print("\nPlease restart the kernel to use the newly installed PyTorch version.")
97
+
98
+ def _get_output(cmd):
99
+ try:
100
+ return subprocess.check_output(cmd).decode("utf-8")
101
+ except Exception as ex:
102
+ logging.exception(ex)
103
+ return None
104
+
105
+ def install_cuda_toolkit():
106
+ #CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
107
+ # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
108
+ CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run"
109
+ CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
110
+ print("\nDownloading CUDA Toolkit from %s" % CUDA_TOOLKIT_URL)
111
+ subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
112
+ subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
113
+ subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
114
+ os.environ["CUDA_HOME"] = "/usr/local/cuda"
115
+ os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ["PATH"])
116
+ os.environ["LD_LIBRARY_PATH"] = "%s/lib:%s" % (
117
+ os.environ["CUDA_HOME"],
118
+ "" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
119
+ )
120
+ # Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
121
+ os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
122
+ print("\nPlease restart the kernel to use the newly installed CUDA Toolkit.")
123
+
124
+ def setup_runtime_env():
125
+ from torch import cuda
126
+ logging.info("Python Version: %s" % _get_output(["python", "--version"]))
127
+ logging.info("CUDA Version: %s" % _get_output(["nvcc", "--version"]))
128
+ logging.info("GCC Version: %s" % _get_output(["gcc", "--version"]))
129
+ logging.info("CUDA is available: %s" % cuda.is_available())
130
+ logging.info("CUDA Device Capability: %s" % (cuda.get_device_capability(),))
131
+
132
+ # Install Pre-compiled CUDA extensions (Fallback to this solution on 12/31/24)
133
+ # Ref: https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/110
134
+ ##ext_dir = os.path.join(os.path.dirname(__file__), "wheels")
135
+ ##for e in os.listdir(ext_dir):
136
+ ## logging.info("Installing Extensions from %s" % e)
137
+ ## subprocess.call(
138
+ ## ["pip", "install", os.path.join(ext_dir, e)], stderr=subprocess.STDOUT
139
+ ## )
140
+ # Compile CUDA extensions
141
+ # Update on 12/31/24: No module named 'torch'. But it is installed and listed by `pip list`
142
+ # ext_dir = os.path.join(os.path.dirname(__file__), "citydreamer", "extensions")
143
+ # for e in os.listdir(ext_dir):
144
+ # if os.path.isdir(os.path.join(ext_dir, e)):
145
+ # subprocess.call(["pip", "install", "."], cwd=os.path.join(ext_dir, e))
146
+
147
+ #logging.info("Installed Python Packages: %s" % _get_output(["pip", "list"]))
utils/version_info.py CHANGED
@@ -2,9 +2,8 @@
2
 
3
  import subprocess
4
  import os
5
- import spaces
6
- import torch
7
  import sys
 
8
  import gradio as gr
9
 
10
  git = os.environ.get('GIT', "git")
@@ -46,14 +45,46 @@ def get_diffusers_version():
46
  return diffusers.__version__
47
  except Exception:
48
  return "<none>"
49
- #@spaces.GPU()
50
  def get_torch_info():
 
 
51
  try:
52
- return [torch.__version__, f"CUDA Version:{torch.version.cuda}", f"Available:{torch.cuda.is_available()}", f"flash attention enabled: {torch.backends.cuda.flash_sdp_enabled()}", f"Capabilities: {torch.cuda.get_device_capability(0)}", f"Device Name: {torch.cuda.get_device_name(0)}", f"Device Count: {torch.cuda.device_count()}"]
 
 
53
  except Exception:
 
54
  return "<none>"
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def versions_html():
 
57
  python_version = ".".join([str(x) for x in sys.version_info[0:3]])
58
  commit = commit_hash()
59
 
@@ -64,12 +95,12 @@ def versions_html():
64
  </a>
65
  '''
66
 
67
- return f"""
68
  version: <a href="https://huggingface.co/spaces/Surn/HexaGrid/commit/{"huggingface" if commit == "<none>" else commit}" target="_blank">{"huggingface" if commit == "<none>" else commit}</a>
69
  &#x2000;•&#x2000;
70
  python: <span title="{sys.version}">{python_version}</span>
71
  &#x2000;•&#x2000;
72
- torch: {getattr(torch, '__long_version__',torch.__version__)}
73
  &#x2000;•&#x2000;
74
  diffusers: {get_diffusers_version()}
75
  &#x2000;•&#x2000;
@@ -82,4 +113,6 @@ def versions_html():
82
  {toggle_dark_link}
83
  <br>
84
  Full GPU Info:{get_torch_info()}
85
- """
 
 
 
2
 
3
  import subprocess
4
  import os
 
 
5
  import sys
6
+ import gc
7
  import gradio as gr
8
 
9
  git = os.environ.get('GIT', "git")
 
45
  return diffusers.__version__
46
  except Exception:
47
  return "<none>"
48
+
49
  def get_torch_info():
50
+ from torch import __version__ as torch_version_, version, cuda, backends
51
+ initialize_cuda()
52
  try:
53
+ info = [torch_version_, f"CUDA Version:{version.cuda}", f"Available:{cuda.is_available()}", f"flash attention enabled: {backends.cuda.flash_sdp_enabled()}", f"Capabilities: {cuda.get_device_capability(0)}", f"Device Name: {cuda.get_device_name(0)}", f"Device Count: {cuda.device_count()}",f"Devices: {os.environ['CUDA_VISIBLE_DEVICES']}", f"Zero :{os.environ['CUDA_MODULE_LOADING']}"]
54
+ del torch_version_, version, cuda, backends
55
+ return info
56
  except Exception:
57
+ del torch_version_, version, cuda, backends
58
  return "<none>"
59
 
60
+ def release_torch_resources():
61
+ from torch import cuda
62
+ # Clear the CUDA cache
63
+ cuda.empty_cache()
64
+ cuda.ipc_collect()
65
+ # Delete any objects that are using GPU memory
66
+ #for obj in gc.get_objects():
67
+ # if is_tensor(obj) or (hasattr(obj, 'data') and is_tensor(obj.data)):
68
+ # del obj
69
+ # Run garbage collection
70
+ del cuda
71
+ gc.collect()
72
+
73
+
74
+ def initialize_cuda():
75
+ from torch import cuda, version
76
+ if cuda.is_available():
77
+ device = cuda.device("cuda")
78
+ print(f"CUDA is available. Using device: {cuda.get_device_name(0)} with CUDA version: {version.cuda}")
79
+ result = "cuda"
80
+ else:
81
+ device = cuda.device("cpu")
82
+ print("CUDA is not available. Using CPU.")
83
+ result = "cpu"
84
+ return result
85
+
86
  def versions_html():
87
+ from torch import __version__ as torch_version_
88
  python_version = ".".join([str(x) for x in sys.version_info[0:3]])
89
  commit = commit_hash()
90
 
 
95
  </a>
96
  '''
97
 
98
+ v_html = f"""
99
  version: <a href="https://huggingface.co/spaces/Surn/HexaGrid/commit/{"huggingface" if commit == "<none>" else commit}" target="_blank">{"huggingface" if commit == "<none>" else commit}</a>
100
  &#x2000;•&#x2000;
101
  python: <span title="{sys.version}">{python_version}</span>
102
  &#x2000;•&#x2000;
103
+ torch: {torch_version_}
104
  &#x2000;•&#x2000;
105
  diffusers: {get_diffusers_version()}
106
  &#x2000;•&#x2000;
 
113
  {toggle_dark_link}
114
  <br>
115
  Full GPU Info:{get_torch_info()}
116
+ """
117
+ del torch_version_
118
+ return v_html