openfree commited on
Commit
2b5b4f4
1 Parent(s): 86c1f58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -24
app.py CHANGED
@@ -23,9 +23,11 @@ import warnings
23
 
24
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
25
 
26
- # 번역 모델 로드
27
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device=0 if torch.cuda.is_available() else -1)
28
 
 
 
 
 
29
  #Load prompts for randomization
30
  df = pd.read_csv('prompts.csv', header=None)
31
  prompt_values = df.values.flatten()
@@ -353,8 +355,11 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
353
  ).images[0]
354
  return final_image
355
 
 
356
  def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
357
- # 한글 감지 및 번역
 
 
358
  if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
359
  translated = translator(prompt, max_length=512)[0]['translation_text']
360
  print(f"Original prompt: {prompt}")
@@ -418,25 +423,28 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
418
  if randomize_seed:
419
  seed = random.randint(0, MAX_SEED)
420
 
421
- # Generate image
422
- if image_input is not None:
423
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
424
- yield final_image, seed, gr.update(visible=False)
425
- else:
426
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
427
- # Consume the generator to get the final image
428
- final_image = None
429
- step_counter = 0
430
- for image in image_generator:
431
- step_counter += 1
432
- final_image = image
433
- progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
434
- yield image, seed, gr.update(value=progress_bar, visible=True)
435
-
436
- if final_image is None:
437
- raise gr.Error("Failed to generate image")
438
-
439
- yield final_image, seed, gr.update(value=progress_bar, visible=False)
 
 
 
440
 
441
  run_lora.zerogpu = True
442
 
@@ -741,16 +749,18 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
741
  inputs=[selected_indices, loras_state],
742
  outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
743
  )
 
744
  gr.on(
745
  triggers=[generate_button.click, prompt.submit],
746
  fn=run_lora,
747
  inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
748
  outputs=[result, seed, progress_bar]
749
- ).then( # Update the history gallery
750
- fn=lambda x, history: update_history(x, history),
751
  inputs=[result, history_gallery],
752
  outputs=history_gallery,
753
  )
 
754
 
755
  # 업스케일 버튼 이벤트 추가
756
  upscale_button.click(
 
23
 
24
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
25
 
 
 
26
 
27
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu")
28
+
29
+
30
+
31
  #Load prompts for randomization
32
  df = pd.read_csv('prompts.csv', header=None)
33
  prompt_values = df.values.flatten()
 
355
  ).images[0]
356
  return final_image
357
 
358
+ # run_lora 함수 수정
359
  def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
360
+ try:
361
+
362
+
363
  if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
364
  translated = translator(prompt, max_length=512)[0]['translation_text']
365
  print(f"Original prompt: {prompt}")
 
423
  if randomize_seed:
424
  seed = random.randint(0, MAX_SEED)
425
 
426
+ if image_input is not None:
427
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
428
+ return final_image, seed, gr.update(visible=False)
429
+ else:
430
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
431
+ final_image = None
432
+ step_counter = 0
433
+ for image in image_generator:
434
+ step_counter += 1
435
+ final_image = image
436
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
437
+ yield image, seed, gr.update(value=progress_bar, visible=True)
438
+
439
+ if final_image is None:
440
+ raise gr.Error("Failed to generate image")
441
+
442
+ return final_image, seed, gr.update(value=progress_bar, visible=False)
443
+ except Exception as e:
444
+ print(f"Error in run_lora: {str(e)}")
445
+ return None, seed, gr.update(visible=False)
446
+
447
+
448
 
449
  run_lora.zerogpu = True
450
 
 
749
  inputs=[selected_indices, loras_state],
750
  outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
751
  )
752
+ # 이벤트 핸들러 수정
753
  gr.on(
754
  triggers=[generate_button.click, prompt.submit],
755
  fn=run_lora,
756
  inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
757
  outputs=[result, seed, progress_bar]
758
+ ).then(
759
+ fn=lambda x, history: update_history(x, history) if x is not None else history,
760
  inputs=[result, history_gallery],
761
  outputs=history_gallery,
762
  )
763
+
764
 
765
  # 업스케일 버튼 이벤트 추가
766
  upscale_button.click(