openfree commited on
Commit
c410887
1 Parent(s): 97ca72b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +188 -207
app.py CHANGED
@@ -4,19 +4,10 @@ import json
4
  import logging
5
  import torch
6
  from PIL import Image
7
- from diffusers import (
8
- DiffusionPipeline,
9
- AutoencoderTiny,
10
- AutoencoderKL,
11
- AutoPipelineForImage2Image,
12
- FluxControlNetModel,
13
- FluxControlNetPipeline,
14
- )
15
- from live_preview_helpers import (
16
- calculate_shift,
17
- retrieve_timesteps,
18
- flux_pipe_call_that_returns_an_iterable_of_images,
19
- )
20
  from diffusers.utils import load_image
21
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
22
  import copy
@@ -25,13 +16,14 @@ import time
25
  import requests
26
  import pandas as pd
27
  from transformers import pipeline
28
- import warnings
29
  from gradio_imageslider import ImageSlider
 
 
30
 
31
  # 번역 모델 로드
32
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
33
 
34
- # Load prompts for randomization
35
  df = pd.read_csv('prompts.csv', header=None)
36
  prompt_values = df.values.flatten()
37
 
@@ -44,6 +36,16 @@ dtype = torch.bfloat16
44
  device = "cuda" if torch.cuda.is_available() else "cpu"
45
  base_model = "black-forest-labs/FLUX.1-dev"
46
 
 
 
 
 
 
 
 
 
 
 
47
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
48
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
49
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
@@ -56,23 +58,16 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
56
  text_encoder_2=pipe.text_encoder_2,
57
  tokenizer_2=pipe.tokenizer_2,
58
  torch_dtype=dtype
59
- ).to(device)
60
 
61
- # 업스케일링을 위한 ControlNet 모델 로드
62
  controlnet = FluxControlNetModel.from_pretrained(
63
- "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=dtype
64
  ).to(device)
65
-
66
- pipe_controlnet = FluxControlNetPipeline(
67
- vae=pipe.vae,
68
- text_encoder=pipe.text_encoder,
69
- tokenizer=pipe.tokenizer,
70
- text_encoder_2=pipe.text_encoder_2,
71
- tokenizer_2=pipe.tokenizer_2,
72
- transformer=pipe.transformer, # unet 대신 transformer 사용
73
- controlnet=controlnet,
74
- scheduler=pipe.scheduler
75
- ).to(device) # 'torch_dtype' 제거
76
 
77
  MAX_SEED = 2**32 - 1
78
  MAX_PIXEL_BUDGET = 1024 * 1024
@@ -98,23 +93,23 @@ class calculateDuration:
98
  def download_file(url, directory=None):
99
  if directory is None:
100
  directory = os.getcwd() # Use current working directory if not specified
101
-
102
  # Get the filename from the URL
103
  filename = url.split('/')[-1]
104
-
105
  # Full path for the downloaded file
106
  filepath = os.path.join(directory, filename)
107
-
108
  # Download the file
109
  response = requests.get(url)
110
  response.raise_for_status() # Raise an exception for bad status codes
111
-
112
  # Write the content to the file
113
  with open(filepath, 'wb') as file:
114
  file.write(response.content)
115
-
116
  return filepath
117
-
118
  def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
119
  selected_index = evt.index
120
  selected_indices = selected_indices or []
@@ -222,7 +217,7 @@ def add_custom_lora(custom_lora, selected_indices, current_loras):
222
  print(f"New LoRA: {new_item}")
223
  existing_item_index = len(current_loras)
224
  current_loras.append(new_item)
225
-
226
  # Update gallery
227
  gallery_items = [(item["image"], item["title"]) for item in current_loras]
228
  # Update selected_indices if there's room
@@ -303,10 +298,11 @@ def remove_custom_lora(selected_indices, current_loras):
303
  lora_image_2
304
  )
305
 
 
306
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
307
  print("Generating image...")
308
- pipe.to(device)
309
- generator = torch.Generator(device=device).manual_seed(seed)
310
  with calculateDuration("Generating image"):
311
  # Generate image
312
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
@@ -322,9 +318,10 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
322
  ):
323
  yield img
324
 
 
325
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
326
- pipe_i2i.to(device)
327
- generator = torch.Generator(device=device).manual_seed(seed)
328
  image_input = load_image(image_input_path)
329
  final_image = pipe_i2i(
330
  prompt=prompt_mash,
@@ -370,7 +367,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
370
  with calculateDuration("Unloading LoRA"):
371
  pipe.unload_lora_weights()
372
  pipe_i2i.unload_lora_weights()
373
-
374
  print(pipe.get_active_adapters())
375
  # Load LoRA weights with respective scales
376
  lora_names = []
@@ -408,7 +405,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
408
  # Generate image
409
  if image_input is not None:
410
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
411
- return final_image, seed, gr.update(visible=False)
412
  else:
413
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
414
  # Consume the generator to get the final image
@@ -425,7 +422,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
425
 
426
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
427
 
428
- # run_lora.zerogpu = True # 데코레이터 문제로 제거
429
 
430
  def get_huggingface_safetensors(link):
431
  split_link = link.split("/")
@@ -483,6 +480,31 @@ def update_history(new_image, history):
483
  history.insert(0, new_image)
484
  return history
485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  def process_input(input_image, upscale_factor, **kwargs):
487
  w, h = input_image.size
488
  w_original, h_original = w, h
@@ -494,8 +516,7 @@ def process_input(input_image, upscale_factor, **kwargs):
494
  warnings.warn(
495
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
496
  )
497
- # Gradio does not have gr.Info, using print instead
498
- print(
499
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
500
  )
501
  input_image = input_image.resize(
@@ -513,6 +534,7 @@ def process_input(input_image, upscale_factor, **kwargs):
513
 
514
  return input_image.resize((w, h)), w_original, h_original, was_resized
515
 
 
516
  def infer_upscale(
517
  seed,
518
  randomize_seed,
@@ -535,9 +557,8 @@ def infer_upscale(
535
 
536
  generator = torch.Generator().manual_seed(seed)
537
 
538
- # Gradio does not have gr.Info, using print instead
539
- print("Upscaling image...")
540
- image = pipe_controlnet(
541
  prompt="",
542
  control_image=control_image,
543
  controlnet_conditioning_scale=controlnet_conditioning_scale,
@@ -549,64 +570,41 @@ def infer_upscale(
549
  ).images[0]
550
 
551
  if was_resized:
552
- print(
553
  f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
554
  )
555
 
556
  # resize to target desired size
557
  image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
558
  image.save("output.jpg")
559
- # convert to PIL Image
560
- return [true_input_image, image]
 
561
 
562
- css = '''
563
- #gen_btn{height: 100%}
564
- #title{text-align: center}
565
- #title h1{font-size: 3em; display:inline-flex; align-items:center}
566
- #title img{width: 100px; margin-right: 0.25em}
567
- #gallery .grid-wrap{height: 5vh}
568
- #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
569
- .custom_lora_card{margin-bottom: 1em}
570
- .card_internal{display: flex;height: 100px;margin-top: .5em}
571
- .card_internal img{margin-right: 1em}
572
- .styler{--form-gap-width: 0px !important}
573
- #progress{height:30px}
574
- #progress .generating{display:none}
575
- .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
576
- .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
577
- #component-8, .button_total{height: 100%; align-self: stretch;}
578
- #loaded_loras [data-testid="block-info"]{font-size:80%}
579
- #custom_lora_structure{background: var(--block-background-fill)}
580
- #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
581
- #random_btn{font-size: 300%}
582
- #component-11{align-self: stretch;}
583
- footer {visibility: hidden;}
584
- '''
585
 
586
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
587
 
588
  loras_state = gr.State(loras)
589
  selected_indices = gr.State([])
590
-
591
- with gr.Tab("Generate"):
592
- with gr.Row():
593
- with gr.Column(scale=3):
594
- prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
595
- with gr.Column(scale=1):
596
- generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
597
- with gr.Row(elem_id="loaded_loras"):
598
- with gr.Column(scale=1, min_width=25):
599
- randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
600
- with gr.Column(scale=8):
601
- with gr.Row():
602
- with gr.Column(scale=0, min_width=50):
603
- lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
604
- with gr.Column(scale=3, min_width=100):
605
- selected_info_1 = gr.Markdown("Select a LoRA 1")
606
- with gr.Column(scale=5, min_width=50):
607
- lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
608
- with gr.Row():
609
- remove_button_1 = gr.Button("Remove", size="sm")
610
  with gr.Column(scale=8):
611
  with gr.Row():
612
  with gr.Column(scale=0, min_width=50):
@@ -617,102 +615,56 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
617
  lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
618
  with gr.Row():
619
  remove_button_2 = gr.Button("Remove", size="sm")
620
- with gr.Row():
621
- with gr.Column():
622
- with gr.Group():
623
- with gr.Row(elem_id="custom_lora_structure"):
624
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="ginipick/flux-lora-eric-cat", scale=3, min_width=150)
625
- add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
626
- remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
627
- gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
628
- gallery = gr.Gallery(
629
- [(item["image"], item["title"]) for item in loras],
630
- label="Or pick from the LoRA Explorer gallery",
631
- allow_preview=False,
632
- columns=4,
633
- elem_id="gallery"
634
- )
 
 
 
 
 
 
 
 
 
 
 
 
635
  with gr.Column():
636
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
637
- result = gr.Image(label="Generated Image", interactive=False)
638
- with gr.Accordion("History", open=False):
639
- history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
640
 
641
- with gr.Row():
642
- with gr.Accordion("Advanced Settings", open=False):
643
  with gr.Row():
644
- input_image = gr.Image(label="Input image", type="filepath")
645
- image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
646
- with gr.Column():
647
- with gr.Row():
648
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
649
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
650
-
651
- with gr.Row():
652
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
653
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
654
-
655
- with gr.Row():
656
- randomize_seed = gr.Checkbox(True, label="Randomize seed")
657
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
658
-
659
- # 이벤트 핸들러 설정
660
- generate_button.click(
661
- fn=run_lora,
662
- inputs=[prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
663
- outputs=[result, seed, progress_bar]
664
- ).then( # Update the history gallery
665
- fn=lambda x, history: update_history(x, history),
666
- inputs=[result, history_gallery],
667
- outputs=history_gallery,
668
- )
669
- prompt.submit(
670
- fn=run_lora,
671
- inputs=[prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
672
- outputs=[result, seed, progress_bar]
673
- ).then( # Update the history gallery
674
- fn=lambda x, history: update_history(x, history),
675
- inputs=[result, history_gallery],
676
- outputs=history_gallery,
677
- )
678
- gallery.select(
679
- fn=update_selection,
680
- inputs=[selected_indices, loras_state, width, height],
681
- outputs=[prompt, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2]
682
- )
683
- remove_button_1.click(
684
- fn=remove_lora_1,
685
- inputs=[selected_indices, loras_state],
686
- outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
687
- )
688
- remove_button_2.click(
689
- fn=remove_lora_2,
690
- inputs=[selected_indices, loras_state],
691
- outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
692
- )
693
- randomize_button.click(
694
- fn=randomize_loras,
695
- inputs=[selected_indices, loras_state],
696
- outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, prompt]
697
- )
698
- add_custom_lora_button.click(
699
- fn=add_custom_lora,
700
- inputs=[custom_lora, selected_indices, loras_state],
701
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
702
- )
703
- remove_custom_lora_button.click(
704
- fn=remove_custom_lora,
705
- inputs=[selected_indices, loras_state],
706
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
707
- )
708
 
709
- with gr.Tab("Upscale"):
710
- with gr.Row():
711
- input_image_upscale = gr.Image(label="Input Image", type="pil")
712
- result_upscale = ImageSlider(label="Input / Output", type="pil", interactive=True)
713
- with gr.Row():
714
- num_inference_steps_upscale = gr.Slider(
715
- label="Number of Inference Steps",
 
 
 
 
 
 
 
716
  minimum=8,
717
  maximum=50,
718
  step=1,
@@ -732,34 +684,63 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
732
  step=0.1,
733
  value=0.6,
734
  )
735
- seed_upscale = gr.Slider(
736
- label="Seed",
737
  minimum=0,
738
  maximum=MAX_SEED,
739
  step=1,
740
  value=42,
741
  )
742
- randomize_seed_upscale = gr.Checkbox(label="Randomize seed", value=True)
743
- with gr.Row():
744
- upscale_button = gr.Button("Upscale", variant="primary")
745
-
746
- # 업스케일 버튼 이벤트 핸들러
747
- upscale_button.click(
748
- fn=infer_upscale,
749
- inputs=[
750
- seed_upscale,
751
- randomize_seed_upscale,
752
- input_image_upscale,
753
- num_inference_steps_upscale,
754
- upscale_factor,
755
- controlnet_conditioning_scale,
756
- ],
757
- outputs=result_upscale,
758
- )
759
 
760
- app.queue()
761
- app.launch()
762
 
763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764
 
765
 
 
 
 
4
  import logging
5
  import torch
6
  from PIL import Image
7
+ import spaces
8
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image, FluxControlNetModel
9
+ from diffusers.pipelines import FluxControlNetPipeline
10
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
 
 
 
 
 
 
 
 
 
11
  from diffusers.utils import load_image
12
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
13
  import copy
 
16
  import requests
17
  import pandas as pd
18
  from transformers import pipeline
 
19
  from gradio_imageslider import ImageSlider
20
+ import numpy as np
21
+ import warnings
22
 
23
  # 번역 모델 로드
24
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
25
 
26
+ #Load prompts for randomization
27
  df = pd.read_csv('prompts.csv', header=None)
28
  prompt_values = df.values.flatten()
29
 
 
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
37
  base_model = "black-forest-labs/FLUX.1-dev"
38
 
39
+ huggingface_token = os.getenv("HUGGINFACE_TOKEN")
40
+
41
+ model_path = snapshot_download(
42
+ repo_id="black-forest-labs/FLUX.1-dev",
43
+ repo_type="model",
44
+ ignore_patterns=["*.md", "*..gitattributes"],
45
+ local_dir="FLUX.1-dev",
46
+ token=huggingface_token, # type a new token-id.
47
+ )
48
+
49
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
50
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
51
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
 
58
  text_encoder_2=pipe.text_encoder_2,
59
  tokenizer_2=pipe.tokenizer_2,
60
  torch_dtype=dtype
61
+ )
62
 
63
+ # Load controlnet for upscaling
64
  controlnet = FluxControlNetModel.from_pretrained(
65
+ "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
66
  ).to(device)
67
+ pipe_upscale = FluxControlNetPipeline.from_pretrained(
68
+ model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
69
+ )
70
+ pipe_upscale.to(device)
 
 
 
 
 
 
 
71
 
72
  MAX_SEED = 2**32 - 1
73
  MAX_PIXEL_BUDGET = 1024 * 1024
 
93
  def download_file(url, directory=None):
94
  if directory is None:
95
  directory = os.getcwd() # Use current working directory if not specified
96
+
97
  # Get the filename from the URL
98
  filename = url.split('/')[-1]
99
+
100
  # Full path for the downloaded file
101
  filepath = os.path.join(directory, filename)
102
+
103
  # Download the file
104
  response = requests.get(url)
105
  response.raise_for_status() # Raise an exception for bad status codes
106
+
107
  # Write the content to the file
108
  with open(filepath, 'wb') as file:
109
  file.write(response.content)
110
+
111
  return filepath
112
+
113
  def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
114
  selected_index = evt.index
115
  selected_indices = selected_indices or []
 
217
  print(f"New LoRA: {new_item}")
218
  existing_item_index = len(current_loras)
219
  current_loras.append(new_item)
220
+
221
  # Update gallery
222
  gallery_items = [(item["image"], item["title"]) for item in current_loras]
223
  # Update selected_indices if there's room
 
298
  lora_image_2
299
  )
300
 
301
+ @spaces.GPU(duration=75)
302
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
303
  print("Generating image...")
304
+ pipe.to("cuda")
305
+ generator = torch.Generator(device="cuda").manual_seed(seed)
306
  with calculateDuration("Generating image"):
307
  # Generate image
308
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
 
318
  ):
319
  yield img
320
 
321
+ @spaces.GPU(duration=75)
322
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
323
+ pipe_i2i.to("cuda")
324
+ generator = torch.Generator(device="cuda").manual_seed(seed)
325
  image_input = load_image(image_input_path)
326
  final_image = pipe_i2i(
327
  prompt=prompt_mash,
 
367
  with calculateDuration("Unloading LoRA"):
368
  pipe.unload_lora_weights()
369
  pipe_i2i.unload_lora_weights()
370
+
371
  print(pipe.get_active_adapters())
372
  # Load LoRA weights with respective scales
373
  lora_names = []
 
405
  # Generate image
406
  if image_input is not None:
407
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
408
+ yield final_image, seed, gr.update(visible=False)
409
  else:
410
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
411
  # Consume the generator to get the final image
 
422
 
423
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
424
 
425
+ run_lora.zerogpu = True
426
 
427
  def get_huggingface_safetensors(link):
428
  split_link = link.split("/")
 
480
  history.insert(0, new_image)
481
  return history
482
 
483
+ css = '''
484
+ #gen_btn{height: 100%}
485
+ #title{text-align: center}
486
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
487
+ #title img{width: 100px; margin-right: 0.25em}
488
+ #gallery .grid-wrap{height: 5vh}
489
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
490
+ .custom_lora_card{margin-bottom: 1em}
491
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
492
+ .card_internal img{margin-right: 1em}
493
+ .styler{--form-gap-width: 0px !important}
494
+ #progress{height:30px}
495
+ #progress .generating{display:none}
496
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
497
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
498
+ #component-8, .button_total{height: 100%; align-self: stretch;}
499
+ #loaded_loras [data-testid="block-info"]{font-size:80%}
500
+ #custom_lora_structure{background: var(--block-background-fill)}
501
+ #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
502
+ #random_btn{font-size: 300%}
503
+ #component-11{align-self: stretch;}
504
+ footer {visibility: hidden;}
505
+ '''
506
+
507
+ # 업스케일 관련 함수 추가
508
  def process_input(input_image, upscale_factor, **kwargs):
509
  w, h = input_image.size
510
  w_original, h_original = w, h
 
516
  warnings.warn(
517
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
518
  )
519
+ gr.Info(
 
520
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
521
  )
522
  input_image = input_image.resize(
 
534
 
535
  return input_image.resize((w, h)), w_original, h_original, was_resized
536
 
537
+ @spaces.GPU
538
  def infer_upscale(
539
  seed,
540
  randomize_seed,
 
557
 
558
  generator = torch.Generator().manual_seed(seed)
559
 
560
+ gr.Info("Upscaling image...")
561
+ image = pipe_upscale(
 
562
  prompt="",
563
  control_image=control_image,
564
  controlnet_conditioning_scale=controlnet_conditioning_scale,
 
570
  ).images[0]
571
 
572
  if was_resized:
573
+ gr.Info(
574
  f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
575
  )
576
 
577
  # resize to target desired size
578
  image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
579
  image.save("output.jpg")
580
+ # convert to numpy
581
+ return [true_input_image, image, seed]
582
+
583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
584
 
585
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
586
 
587
  loras_state = gr.State(loras)
588
  selected_indices = gr.State([])
589
+ with gr.Row():
590
+ with gr.Column(scale=3):
591
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
592
+ with gr.Column(scale=1):
593
+ generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
594
+
595
+ with gr.Row(elem_id="loaded_loras"):
596
+ with gr.Column(scale=1, min_width=25):
597
+ randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
598
+ with gr.Column(scale=8):
599
+ with gr.Row():
600
+ with gr.Column(scale=0, min_width=50):
601
+ lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
602
+ with gr.Column(scale=3, min_width=100):
603
+ selected_info_1 = gr.Markdown("Select a LoRA 1")
604
+ with gr.Column(scale=5, min_width=50):
605
+ lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
606
+ with gr.Row():
607
+ remove_button_1 = gr.Button("Remove", size="sm")
 
608
  with gr.Column(scale=8):
609
  with gr.Row():
610
  with gr.Column(scale=0, min_width=50):
 
615
  lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
616
  with gr.Row():
617
  remove_button_2 = gr.Button("Remove", size="sm")
618
+ with gr.Row():
619
+ with gr.Column():
620
+ with gr.Group():
621
+ with gr.Row(elem_id="custom_lora_structure"):
622
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="ginipick/flux-lora-eric-cat", scale=3, min_width=150)
623
+ add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
624
+ remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
625
+ gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
626
+ gallery = gr.Gallery(
627
+ [(item["image"], item["title"]) for item in loras],
628
+ label="Or pick from the LoRA Explorer gallery",
629
+ allow_preview=False,
630
+ columns=4,
631
+ elem_id="gallery"
632
+ )
633
+ with gr.Column():
634
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
635
+ result = gr.Image(label="Generated Image", interactive=False)
636
+ with gr.Accordion("History", open=False):
637
+ history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
638
+
639
+
640
+ with gr.Row():
641
+ with gr.Accordion("Advanced Settings", open=False):
642
+ with gr.Row():
643
+ input_image = gr.Image(label="Input image", type="filepath")
644
+ image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
645
  with gr.Column():
646
+ with gr.Row():
647
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
648
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
 
649
 
 
 
650
  with gr.Row():
651
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
652
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653
 
654
+ with gr.Row():
655
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
656
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
657
+
658
+ # 업스케일 관련 UI 추가
659
+ with gr.Row():
660
+ upscale_button = gr.Button("Upscale")
661
+
662
+ with gr.Row():
663
+ with gr.Column(scale=4):
664
+ upscale_input = gr.Image(label="Input Image for Upscaling", type="pil")
665
+ with gr.Column(scale=1):
666
+ upscale_steps = gr.Slider(
667
+ label="Number of Inference Steps for Upscaling",
668
  minimum=8,
669
  maximum=50,
670
  step=1,
 
684
  step=0.1,
685
  value=0.6,
686
  )
687
+ upscale_seed = gr.Slider(
688
+ label="Seed for Upscaling",
689
  minimum=0,
690
  maximum=MAX_SEED,
691
  step=1,
692
  value=42,
693
  )
694
+ upscale_randomize_seed = gr.Checkbox(label="Randomize seed for Upscaling", value=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
 
696
+ with gr.Row():
697
+ upscale_result = ImageSlider(label="Input / Output for Upscaling", type="pil", interactive=True)
698
 
699
 
700
+ gallery.select(
701
+ update_selection,
702
+ inputs=[selected_indices, loras_state, width, height],
703
+ outputs=[prompt, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2])
704
+ remove_button_1.click(
705
+ remove_lora_1,
706
+ inputs=[selected_indices, loras_state],
707
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
708
+ )
709
+ remove_button_2.click(
710
+ remove_lora_2,
711
+ inputs=[selected_indices, loras_state],
712
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
713
+ )
714
+ randomize_button.click(
715
+ randomize_loras,
716
+ inputs=[selected_indices, loras_state],
717
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, prompt]
718
+ )
719
+ add_custom_lora_button.click(
720
+ add_custom_lora,
721
+ inputs=[custom_lora, selected_indices, loras_state],
722
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
723
+ )
724
+ remove_custom_lora_button.click(
725
+ remove_custom_lora,
726
+ inputs=[selected_indices, loras_state],
727
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
728
+ )
729
+
730
+ gr.on(
731
+ [upscale_button.click],
732
+ fn=infer_upscale,
733
+ inputs=[
734
+ upscale_seed,
735
+ upscale_randomize_seed,
736
+ upscale_input,
737
+ upscale_steps,
738
+ upscale_factor,
739
+ controlnet_conditioning_scale,
740
+ ],
741
+ outputs=upscale_result,
742
+ )
743
 
744
 
745
+ app.queue()
746
+ app.launch()