salomonsky commited on
Commit
0198afd
1 Parent(s): 3e8cb73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -30
app.py CHANGED
@@ -23,18 +23,15 @@ def enable_lora(lora_add, basemodel):
23
  return basemodel if not lora_add else lora_add
24
 
25
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
26
- try:
27
- if seed == -1:
28
- seed = random.randint(0, MAX_SEED)
29
- seed = int(seed)
30
- text = str(translator.translate(prompt, 'English')) + "," + lora_word
31
- client = AsyncInferenceClient()
32
- image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
33
- return image, seed
34
- except Exception as e:
35
- raise gr.Error(f"Error en {e}")
36
 
37
- async def gen(prompt, basemodel, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale):
38
  model = enable_lora(lora_add, basemodel)
39
  image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
40
  image_path = "temp_image.png"
@@ -71,24 +68,26 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
71
  basemodel_choice = gr.Dropdown(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
72
  lora_add = gr.Textbox(label="Add Flux LoRA", info="Modelo Lora", lines=1, value="XLabs-AI/flux-RealismLora")
73
  lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="")
74
- width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
75
- height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=512)
76
- scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
77
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24)
78
- seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
79
  upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 4, 8], value=2, scale=2)
80
  process_upscale = gr.Checkbox(label="Process Upscale", value=False)
81
- submit_btn = gr.Button("Submit", scale=1)
82
-
83
- submit_btn.click(
84
- fn=lambda: None,
85
- inputs=None,
86
- outputs=[output_res],
87
- queue=False
88
- ).then(
89
- fn=gen,
90
- inputs=[prompt, basemodel_choice, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale],
91
- outputs=[output_res]
92
- )
93
-
94
- demo.launch()
 
 
 
 
 
 
23
  return basemodel if not lora_add else lora_add
24
 
25
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
26
+ if seed == -1:
27
+ seed = random.randint(0, MAX_SEED)
28
+ seed = int(seed)
29
+ text = str(translator.translate(prompt, 'English')) + "," + lora_word
30
+ client = AsyncInferenceClient()
31
+ image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
32
+ return image, seed
 
 
 
33
 
34
+ async def gen(prompt, basemodel, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
35
  model = enable_lora(lora_add, basemodel)
36
  image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
37
  image_path = "temp_image.png"
 
68
  basemodel_choice = gr.Dropdown(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
69
  lora_add = gr.Textbox(label="Add Flux LoRA", info="Modelo Lora", lines=1, value="XLabs-AI/flux-RealismLora")
70
  lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="")
71
+ lora_model_choice = gr.Dropdown(label="LORA Model", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "Otro modelo LORA"])
72
+ process_lora = gr.Checkbox(label="Process LORA", value=True)
 
 
 
73
  upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 4, 8], value=2, scale=2)
74
  process_upscale = gr.Checkbox(label="Process Upscale", value=False)
75
+
76
+ with gr.Accordion(label="Advanced Options", open=False):
77
+ width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
78
+ height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=512)
79
+ scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
80
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24)
81
+ seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
82
+ submit_btn = gr.Button("Submit", scale=1)
83
+ submit_btn.click(
84
+ fn=lambda: None,
85
+ inputs=None,
86
+ outputs=[output_res],
87
+ queue=False
88
+ ).then(
89
+ fn=gen,
90
+ inputs=[prompt, basemodel_choice, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
91
+ outputs=[output_res]
92
+ )
93
+ demo.launch()