openfree commited on
Commit
54a62f5
1 Parent(s): c410887

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -15
app.py CHANGED
@@ -20,6 +20,9 @@ from gradio_imageslider import ImageSlider
20
  import numpy as np
21
  import warnings
22
 
 
 
 
23
  # 번역 모델 로드
24
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
25
 
@@ -34,21 +37,16 @@ with open('loras.json', 'r') as f:
34
  # Initialize the base model
35
  dtype = torch.bfloat16
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
37
- base_model = "black-forest-labs/FLUX.1-dev"
38
 
39
- huggingface_token = os.getenv("HUGGINFACE_TOKEN")
40
-
41
- model_path = snapshot_download(
42
- repo_id="black-forest-labs/FLUX.1-dev",
43
- repo_type="model",
44
- ignore_patterns=["*.md", "*..gitattributes"],
45
- local_dir="FLUX.1-dev",
46
- token=huggingface_token, # type a new token-id.
47
- )
48
 
 
49
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
50
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
51
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
 
52
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
53
  base_model,
54
  vae=good_vae,
@@ -60,14 +58,25 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
60
  torch_dtype=dtype
61
  )
62
 
63
- # Load controlnet for upscaling
64
  controlnet = FluxControlNetModel.from_pretrained(
65
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
66
  ).to(device)
67
- pipe_upscale = FluxControlNetPipeline.from_pretrained(
68
- model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
 
 
 
 
 
 
 
 
 
69
  )
70
- pipe_upscale.to(device)
 
 
71
 
72
  MAX_SEED = 2**32 - 1
73
  MAX_PIXEL_BUDGET = 1024 * 1024
 
20
  import numpy as np
21
  import warnings
22
 
23
+
24
+ huggingface_token = os.getenv("HUGGINFACE_TOKEN")
25
+
26
  # 번역 모델 로드
27
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
28
 
 
37
  # Initialize the base model
38
  dtype = torch.bfloat16
39
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
40
 
41
+ # 공통 FLUX 모델 로드
42
+ base_model = "black-forest-labs/FLUX.1-dev"
43
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, device_map="auto")
 
 
 
 
 
 
44
 
45
+ # LoRA를 위한 설정
46
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
47
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
48
+
49
+ # Image-to-Image 파이프라인 설정
50
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
51
  base_model,
52
  vae=good_vae,
 
58
  torch_dtype=dtype
59
  )
60
 
61
+ # Upscale을 위한 ControlNet 설정
62
  controlnet = FluxControlNetModel.from_pretrained(
63
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
64
  ).to(device)
65
+
66
+ # Upscale 파이프라인 설정 (기존 pipe 재사용)
67
+ pipe_upscale = FluxControlNetPipeline(
68
+ vae=pipe.vae,
69
+ text_encoder=pipe.text_encoder,
70
+ tokenizer=pipe.tokenizer,
71
+ unet=pipe.unet,
72
+ scheduler=pipe.scheduler,
73
+ safety_checker=pipe.safety_checker,
74
+ feature_extractor=pipe.feature_extractor,
75
+ controlnet=controlnet
76
  )
77
+
78
+
79
+
80
 
81
  MAX_SEED = 2**32 - 1
82
  MAX_PIXEL_BUDGET = 1024 * 1024