openfree commited on
Commit
e3c44d1
1 Parent(s): 6141d0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -45
app.py CHANGED
@@ -202,19 +202,23 @@ def remove_lora_2(selected_indices, loras_state):
202
  return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
203
 
204
  def randomize_loras(selected_indices, loras_state):
205
- if len(loras_state) < 2:
206
- raise gr.Error("Not enough LoRAs to randomize.")
207
- selected_indices = random.sample(range(len(loras_state)), 2)
208
- lora1 = loras_state[selected_indices[0]]
209
- lora2 = loras_state[selected_indices[1]]
210
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
211
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
212
- lora_scale_1 = 1.15
213
- lora_scale_2 = 1.15
214
- lora_image_1 = lora1['image']
215
- lora_image_2 = lora2['image']
216
- random_prompt = random.choice(prompt_values)
217
- return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, random_prompt
 
 
 
 
218
 
219
  def add_custom_lora(custom_lora, selected_indices, current_loras):
220
  if custom_lora:
@@ -566,42 +570,52 @@ def infer_upscale(
566
  controlnet_conditioning_scale,
567
  progress=gr.Progress(track_tqdm=True),
568
  ):
569
- if randomize_seed:
570
- seed = random.randint(0, MAX_SEED)
571
- true_input_image = input_image
572
- input_image, w_original, h_original, was_resized = process_input(
573
- input_image, upscale_factor
574
- )
 
 
 
 
575
 
576
- # rescale with upscale factor
577
- w, h = input_image.size
578
- control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
579
-
580
- generator = torch.Generator().manual_seed(seed)
581
-
582
- gr.Info("Upscaling image...")
583
- image = pipe_upscale(
584
- prompt="",
585
- control_image=control_image,
586
- controlnet_conditioning_scale=controlnet_conditioning_scale,
587
- num_inference_steps=num_inference_steps,
588
- guidance_scale=3.5,
589
- height=control_image.size[1],
590
- width=control_image.size[0],
591
- generator=generator,
592
- ).images[0]
593
 
594
- if was_resized:
595
- gr.Info(
596
- f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
597
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
598
 
599
- # resize to target desired size
600
- image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
601
- image.save("output.jpg")
602
- # convert to numpy
603
- return [true_input_image, image, seed]
604
 
 
 
 
 
 
 
 
 
605
 
606
 
607
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
 
202
  return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
203
 
204
  def randomize_loras(selected_indices, loras_state):
205
+ try:
206
+ if len(loras_state) < 2:
207
+ raise gr.Error("Not enough LoRAs to randomize.")
208
+ selected_indices = random.sample(range(len(loras_state)), 2)
209
+ lora1 = loras_state[selected_indices[0]]
210
+ lora2 = loras_state[selected_indices[1]]
211
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
212
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
213
+ lora_scale_1 = 1.15
214
+ lora_scale_2 = 1.15
215
+ lora_image_1 = lora1['image']
216
+ lora_image_2 = lora2['image']
217
+ random_prompt = random.choice(prompt_values)
218
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, random_prompt
219
+ except Exception as e:
220
+ print(f"Error in randomize_loras: {str(e)}")
221
+ return "Error", "Error", [], 1.15, 1.15, None, None, ""
222
 
223
  def add_custom_lora(custom_lora, selected_indices, current_loras):
224
  if custom_lora:
 
570
  controlnet_conditioning_scale,
571
  progress=gr.Progress(track_tqdm=True),
572
  ):
573
+ try:
574
+ if input_image is None:
575
+ raise ValueError("No input image provided")
576
+
577
+ if randomize_seed:
578
+ seed = random.randint(0, MAX_SEED)
579
+ true_input_image = input_image
580
+ input_image, w_original, h_original, was_resized = process_input(
581
+ input_image, upscale_factor
582
+ )
583
 
584
+ # rescale with upscale factor
585
+ w, h = input_image.size
586
+ control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
 
588
+ generator = torch.Generator(device=device).manual_seed(seed)
589
+
590
+ gr.Info("Upscaling image...")
591
+ # 모든 텐서를 동일한 디바이스로 이동
592
+ pipe_upscale.to(device)
593
+ control_image = control_image.to(device)
594
+
595
+ image = pipe_upscale(
596
+ prompt="",
597
+ control_image=control_image,
598
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
599
+ num_inference_steps=num_inference_steps,
600
+ guidance_scale=3.5,
601
+ height=control_image.size[1],
602
+ width=control_image.size[0],
603
+ generator=generator,
604
+ ).images[0]
605
 
606
+ if was_resized:
607
+ gr.Info(
608
+ f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
609
+ )
 
610
 
611
+ # resize to target desired size
612
+ image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
613
+ image.save("output.jpg")
614
+ # convert to numpy
615
+ return [true_input_image, image, seed]
616
+ except Exception as e:
617
+ print(f"Error in infer_upscale: {str(e)}")
618
+ return [None, None, seed]
619
 
620
 
621
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app: