openfree commited on
Commit
c49fe58
1 Parent(s): a3102eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -43,12 +43,11 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
43
 
44
  # 공통 FLUX 모델 로드
45
  base_model = "black-forest-labs/FLUX.1-dev"
46
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, low_cpu_mem_usage=True)
47
- pipe.to(device)
48
 
49
  # LoRA를 위한 설정
50
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype, low_cpu_mem_usage=True)
51
- good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype, low_cpu_mem_usage=True)
52
 
53
  # Image-to-Image 파이프라인 설정
54
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
@@ -59,14 +58,13 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
59
  tokenizer=pipe.tokenizer,
60
  text_encoder_2=pipe.text_encoder_2,
61
  tokenizer_2=pipe.tokenizer_2,
62
- torch_dtype=dtype,
63
- low_cpu_mem_usage=True
64
- )
65
 
66
  # Upscale을 위한 ControlNet 설정
67
  controlnet = FluxControlNetModel.from_pretrained(
68
- "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True
69
- )
70
 
71
  # Upscale 파이프라인 설정 (기존 pipe 재사용)
72
  pipe_upscale = FluxControlNetPipeline(
@@ -78,7 +76,7 @@ pipe_upscale = FluxControlNetPipeline(
78
  transformer=pipe.transformer,
79
  scheduler=pipe.scheduler,
80
  controlnet=controlnet
81
- )
82
 
83
  MAX_SEED = 2**32 - 1
84
  MAX_PIXEL_BUDGET = 1024 * 1024
@@ -587,7 +585,7 @@ def infer_upscale(
587
 
588
  image = pipe_upscale(
589
  prompt="",
590
- image=control_image,
591
  controlnet_conditioning_scale=controlnet_conditioning_scale,
592
  num_inference_steps=num_inference_steps,
593
  guidance_scale=3.5,
@@ -605,7 +603,7 @@ def infer_upscale(
605
  return image, seed
606
  except Exception as e:
607
  print(f"Error in infer_upscale: {str(e)}")
608
- return gr.Error(f"Upscaling failed: {str(e)}"), seed
609
 
610
  def check_upscale_input(input_image, *args):
611
  if input_image is None:
 
43
 
44
  # 공통 FLUX 모델 로드
45
  base_model = "black-forest-labs/FLUX.1-dev"
46
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
 
47
 
48
  # LoRA를 위한 설정
49
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
50
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
51
 
52
  # Image-to-Image 파이프라인 설정
53
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
 
58
  tokenizer=pipe.tokenizer,
59
  text_encoder_2=pipe.text_encoder_2,
60
  tokenizer_2=pipe.tokenizer_2,
61
+ torch_dtype=dtype
62
+ ).to(device)
 
63
 
64
  # Upscale을 위한 ControlNet 설정
65
  controlnet = FluxControlNetModel.from_pretrained(
66
+ "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
67
+ ).to(device)
68
 
69
  # Upscale 파이프라인 설정 (기존 pipe 재사용)
70
  pipe_upscale = FluxControlNetPipeline(
 
76
  transformer=pipe.transformer,
77
  scheduler=pipe.scheduler,
78
  controlnet=controlnet
79
+ ).to(device)
80
 
81
  MAX_SEED = 2**32 - 1
82
  MAX_PIXEL_BUDGET = 1024 * 1024
 
585
 
586
  image = pipe_upscale(
587
  prompt="",
588
+ control_image=control_image,
589
  controlnet_conditioning_scale=controlnet_conditioning_scale,
590
  num_inference_steps=num_inference_steps,
591
  guidance_scale=3.5,
 
603
  return image, seed
604
  except Exception as e:
605
  print(f"Error in infer_upscale: {str(e)}")
606
+ return None, seed
607
 
608
  def check_upscale_input(input_image, *args):
609
  if input_image is None: