Spaces:
openfree
/
Running on Zero

openfree commited on
Commit
d1ecaf3
โ€ข
1 Parent(s): b8987a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -23
app.py CHANGED
@@ -6,6 +6,7 @@ import gradio as gr
6
  import torch
7
  from einops import rearrange
8
  from PIL import Image
 
9
 
10
  from flux.cli import SamplingOptions
11
  from flux.sampling import denoise, get_noise, get_schedule, prepare, unpack
@@ -13,6 +14,8 @@ from flux.util import load_ae, load_clip, load_flow_model, load_t5
13
  from pulid.pipeline_flux import PuLIDPipeline
14
  from pulid.utils import resize_numpy_image_long
15
 
 
 
16
 
17
  def get_models(name: str, device: torch.device, offload: bool):
18
  t5 = load_t5(device, max_length=128)
@@ -57,6 +60,11 @@ def generate_image(
57
  timestep_to_start_cfg=1,
58
  max_sequence_length=128,
59
  ):
 
 
 
 
 
60
  flux_generator.t5.max_length = max_sequence_length
61
 
62
  seed = int(seed)
@@ -157,31 +165,33 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
157
  offload: bool = False):
158
 
159
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
 
 
160
 
161
  with gr.Row():
162
  with gr.Column():
163
- prompt = gr.Textbox(label="Prompt", value="portrait, color, cinematic")
164
- id_image = gr.Image(label="ID Image")
165
- generate_btn = gr.Button("Generate")
166
 
167
  with gr.Column():
168
- output_image = gr.Image(label="Generated Image")
169
 
170
  with gr.Row():
171
  with gr.Column():
172
- gr.Markdown("## Examples")
173
 
174
  all_examples = [
175
- ['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png'],
176
- ['portrait, side view', 'example_inputs/liuyifei.png'],
177
- ['white-haired woman with vr technology atmosphere', 'example_inputs/liuyifei.png'],
178
- ['a young child is eating Icecream', 'example_inputs/liuyifei.png'],
179
- ['a man is holding a sign with text \"PuLID for FLUX\", winter, snowing', 'example_inputs/pengwei.jpg'],
180
- ['portrait, candle light', 'example_inputs/pengwei.jpg'],
181
- ['profile shot dark photo of a 25-year-old male with smoke', 'example_inputs/pengwei.jpg'],
182
- ['American Comics, 1boy', 'example_inputs/pengwei.jpg'],
183
- ['portrait, pixar', 'example_inputs/pengwei.jpg'],
184
- ['portrait, made of ice sculpture', 'example_inputs/lecun.jpg'],
185
  ]
186
 
187
  example_images = [example[1] for example in all_examples]
@@ -189,7 +199,7 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
189
 
190
  gallery = gr.Gallery(
191
  value=list(zip(example_images, example_captions)),
192
- label="Example Gallery",
193
  show_label=False,
194
  elem_id="gallery",
195
  columns=5,
@@ -219,7 +229,7 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
219
  prompt,
220
  id_image,
221
  gr.Slider(0.0, 3.0, 1, step=0.05, visible=False), # id_weight
222
- gr.Textbox("bad quality, worst quality, text, signature, watermark, extra limbs", visible=False), # neg_prompt
223
  gr.Slider(1.0, 10.0, 1, step=0.1, visible=False), # true_cfg
224
  gr.Slider(0, 20, 1, step=1, visible=False), # timestep_to_start_cfg
225
  gr.Slider(128, 512, 128, step=128, visible=False), # max_sequence_length
@@ -234,13 +244,13 @@ if __name__ == "__main__":
234
 
235
  parser = argparse.ArgumentParser(description="PuLID for FLUX.1-dev")
236
  parser.add_argument("--name", type=str, default="flux-dev", choices=list('flux-dev'),
237
- help="currently only support flux-dev")
238
  parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
239
- help="Device to use")
240
- parser.add_argument("--offload", action="store_true", help="Offload model to CPU when not in use")
241
- parser.add_argument("--port", type=int, default=8080, help="Port to use")
242
- parser.add_argument("--dev", action='store_true', help="Development mode")
243
- parser.add_argument("--pretrained_model", type=str, help='for development')
244
  args = parser.parse_args()
245
 
246
  import huggingface_hub
 
6
  import torch
7
  from einops import rearrange
8
  from PIL import Image
9
+ from transformers import pipeline
10
 
11
  from flux.cli import SamplingOptions
12
  from flux.sampling import denoise, get_noise, get_schedule, prepare, unpack
 
14
  from pulid.pipeline_flux import PuLIDPipeline
15
  from pulid.utils import resize_numpy_image_long
16
 
17
+ # ํ•œ์˜ ๋ฒˆ์—ญ ๋ชจ๋ธ ๋กœ๋“œ
18
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
19
 
20
  def get_models(name: str, device: torch.device, offload: bool):
21
  t5 = load_t5(device, max_length=128)
 
60
  timestep_to_start_cfg=1,
61
  max_sequence_length=128,
62
  ):
63
+ # ํ•œ๊ธ€ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์˜์–ด๋กœ ๋ฒˆ์—ญ
64
+ if any('\u3131' <= c <= '\u318E' or '\uAC00' <= c <= '\uD7A3' for c in prompt):
65
+ translated = translator(prompt)[0]['translation_text']
66
+ prompt = translated
67
+
68
  flux_generator.t5.max_length = max_sequence_length
69
 
70
  seed = int(seed)
 
165
  offload: bool = False):
166
 
167
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
168
+ gr.Markdown("# ์ œ๋ชฉ")
169
+ gr.Markdown("## ์„ค๋ช…")
170
 
171
  with gr.Row():
172
  with gr.Column():
173
+ prompt = gr.Textbox(label="ํ”„๋กฌํ”„ํŠธ", value="์ดˆ์ƒํ™”, ์ƒ‰๊ฐ, ์˜ํ™”์ ")
174
+ id_image = gr.Image(label="ID ์ด๋ฏธ์ง€", source="webcam", type="numpy")
175
+ generate_btn = gr.Button("์ƒ์„ฑ")
176
 
177
  with gr.Column():
178
+ output_image = gr.Image(label="์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€")
179
 
180
  with gr.Row():
181
  with gr.Column():
182
+ gr.Markdown("## ์˜ˆ์‹œ")
183
 
184
  all_examples = [
185
+ ['์—ฌ์ž๊ฐ€ \"PuLID for FLUX\"๋ผ๊ณ  ์“ฐ์ธ ๋น›๋‚˜๋Š” ๋…น์ƒ‰ ํ‘œ์ง€ํŒ์„ ๋“ค๊ณ  ์žˆ๋‹ค', 'example_inputs/liuyifei.png'],
186
+ ['์˜†๋ชจ์Šต ์ดˆ์ƒํ™”', 'example_inputs/liuyifei.png'],
187
+ ['VR ๊ธฐ์ˆ  ๋ถ„์œ„๊ธฐ์˜ ํฐ ๋จธ๋ฆฌ ์—ฌ์„ฑ', 'example_inputs/liuyifei.png'],
188
+ ['์–ด๋ฆฐ ์•„์ด๊ฐ€ ์•„์ด์Šคํฌ๋ฆผ์„ ๋จน๊ณ  ์žˆ๋‹ค', 'example_inputs/liuyifei.png'],
189
+ ['๋‚จ์ž๊ฐ€ \"PuLID for FLUX\"๋ผ๊ณ  ์“ฐ์ธ ํ‘œ์ง€ํŒ์„ ๋“ค๊ณ  ์žˆ๋‹ค, ๊ฒจ์šธ, ๋ˆˆ ๋‚ด๋ฆผ, ์‚ฐ ์ •์ƒ', 'example_inputs/pengwei.jpg'],
190
+ ['์ดˆ์ƒํ™”, ์ด›๋ถˆ ์กฐ๋ช…', 'example_inputs/pengwei.jpg'],
191
+ ['25์„ธ ๋‚จ์„ฑ์˜ ์–ด๋‘์šด ํ”„๋กœํ•„ ์‚ฌ์ง„, ์ž…์—์„œ ์—ฐ๊ธฐ๊ฐ€ ๋‚˜์˜ค๊ณ  ์žˆ์Œ', 'example_inputs/pengwei.jpg'],
192
+ ['๋ฏธ๊ตญ ๋งŒํ™” ์Šคํƒ€์ผ, ์†Œ๋…„ 1๋ช…', 'example_inputs/pengwei.jpg'],
193
+ ['์ดˆ์ƒํ™”, ํ”ฝ์‚ฌ ์Šคํƒ€์ผ', 'example_inputs/pengwei.jpg'],
194
+ ['์ดˆ์ƒํ™”, ์–ผ์Œ ์กฐ๊ฐ์ƒ', 'example_inputs/lecun.jpg'],
195
  ]
196
 
197
  example_images = [example[1] for example in all_examples]
 
199
 
200
  gallery = gr.Gallery(
201
  value=list(zip(example_images, example_captions)),
202
+ label="์˜ˆ์‹œ ๊ฐค๋Ÿฌ๋ฆฌ",
203
  show_label=False,
204
  elem_id="gallery",
205
  columns=5,
 
229
  prompt,
230
  id_image,
231
  gr.Slider(0.0, 3.0, 1, step=0.05, visible=False), # id_weight
232
+ gr.Textbox("์ €ํ’ˆ์งˆ, ์ตœ์•…์˜ ํ’ˆ์งˆ, ํ…์ŠคํŠธ, ์„œ๋ช…, ์›Œํ„ฐ๋งˆํฌ, ์—ฌ๋ถ„์˜ ํŒ”๋‹ค๋ฆฌ", visible=False), # neg_prompt
233
  gr.Slider(1.0, 10.0, 1, step=0.1, visible=False), # true_cfg
234
  gr.Slider(0, 20, 1, step=1, visible=False), # timestep_to_start_cfg
235
  gr.Slider(128, 512, 128, step=128, visible=False), # max_sequence_length
 
244
 
245
  parser = argparse.ArgumentParser(description="PuLID for FLUX.1-dev")
246
  parser.add_argument("--name", type=str, default="flux-dev", choices=list('flux-dev'),
247
+ help="ํ˜„์žฌ๋Š” flux-dev๋งŒ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค")
248
  parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
249
+ help="์‚ฌ์šฉํ•  ๋””๋ฐ”์ด์Šค")
250
+ parser.add_argument("--offload", action="store_true", help="์‚ฌ์šฉํ•˜์ง€ ์•Š์„ ๋•Œ ๋ชจ๋ธ์„ CPU๋กœ ์˜ฎ๊น๋‹ˆ๋‹ค")
251
+ parser.add_argument("--port", type=int, default=8080, help="์‚ฌ์šฉํ•  ํฌํŠธ")
252
+ parser.add_argument("--dev", action='store_true', help="๊ฐœ๋ฐœ ๋ชจ๋“œ")
253
+ parser.add_argument("--pretrained_model", type=str, help='๊ฐœ๋ฐœ์šฉ')
254
  args = parser.parse_args()
255
 
256
  import huggingface_hub