salomonsky commited on
Commit
d4fba6d
1 Parent(s): 6af9203

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -65
app.py CHANGED
@@ -1,20 +1,16 @@
1
- import gradio as gr
2
  import os
3
- from gradio_client import Client, handle_file
4
- from huggingface_hub import login
5
- from PIL import Image
6
  import numpy as np
7
  import random
 
8
  from translatepy import Translator
9
  import requests
10
  import re
11
  import asyncio
12
- from gradio_imageslider import ImageSlider
13
-
14
- hf_tkn = os.environ.get("HF_TKN")
15
- login(hf_tkn)
16
 
17
  translator = Translator()
 
18
  basemodel = "black-forest-labs/FLUX.1-dev"
19
  MAX_SEED = np.iinfo(np.int32).max
20
 
@@ -37,44 +33,6 @@ def enable_lora(lora_add):
37
  else:
38
  return lora_add
39
 
40
- def handle_file(img_path):
41
- return Image.open(img_path)
42
-
43
- def get_upscale_finegrain(prompt, img_path, upscale_factor):
44
- if upscale_factor == 0:
45
- return handle_file(img_path)
46
- client = Client("finegrain/finegrain-image-enhancer")
47
- result = client.predict(
48
- input_image=handle_file(img_path),
49
- prompt=prompt,
50
- negative_prompt="",
51
- seed=42,
52
- upscale_factor=upscale_factor,
53
- controlnet_scale=0.6,
54
- controlnet_decay=1,
55
- condition_scale=6,
56
- tile_width=112,
57
- tile_height=144,
58
- denoise_strength=0.35,
59
- num_inference_steps=18,
60
- solver="DDIM",
61
- api_name="/process"
62
- )
63
- print(result)
64
- return result[1]
65
-
66
- async def upscale_image(image, upscale_factor):
67
- try:
68
- result = get_upscale_finegrain(
69
- prompt="",
70
- img_path=image,
71
- upscale_factor=upscale_factor
72
- )
73
- except Exception as e:
74
- raise gr.Error(f"Error in {e}")
75
-
76
- return result
77
-
78
  async def generate_image(
79
  prompt:str,
80
  model:str,
@@ -83,8 +41,8 @@ async def generate_image(
83
  height:int=1024,
84
  scales:float=3.5,
85
  steps:int=24,
86
- seed:int=-1
87
- ):
88
  if seed == -1:
89
  seed = random.randint(0, MAX_SEED)
90
  seed = int(seed)
@@ -92,8 +50,16 @@ async def generate_image(
92
 
93
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
94
 
 
95
  try:
96
- image = gr.Image(type="pil", image=gr.processing_utils.encode_pil_image(text_to_image(text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)))
 
 
 
 
 
 
 
97
  except Exception as e:
98
  raise gr.Error(f"Error in {e}")
99
 
@@ -101,37 +67,98 @@ async def generate_image(
101
 
102
  async def gen(
103
  prompt:str,
104
- lora_add:str="XLabs-AI/flux-RealismLora",
105
  lora_word:str="",
106
  width:int=768,
107
  height:int=1024,
108
  scales:float=3.5,
109
  steps:int=24,
110
  seed:int=-1,
111
- upscale_factor:int=0
112
  ):
113
  model = enable_lora(lora_add)
 
114
  image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed)
115
-
116
- upscaled_image = await upscale_image(image, upscale_factor)
117
- return upscaled_image, seed
118
 
119
  with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
120
  gr.HTML("<h1><center>Flux Lab Light</center></h1>")
 
121
  with gr.Row():
122
  with gr.Column(scale=4):
123
  with gr.Row():
124
- img = gr.Image(type="filepath", label='Imagen generada por Flux', height=600)
125
  with gr.Row():
126
- prompt = gr.Textbox(label='Ingresa tu prompt (Multi-Idiomas)', placeholder="Ingresa prompt...", scale=6)
127
  sendBtn = gr.Button(scale=1, variant='primary')
128
- with gr.Accordion("Opciones avanzadas", open=True):
129
  with gr.Column(scale=1):
130
- width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=768)
131
- height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=1024)
132
- scales = gr.Slider(label="Guía", minimum=3.5, maximum=7, step=0.1, value=3.5)
133
- steps = gr.Slider(label="Pasos", minimum=1, maximum=50, step=1)
134
- upscale_factor = gr.Slider(label="Factor de escala", minimum=0, maximum=4, step=1, value=0)
135
- seed = gr.Number(label="Semilla", value=-1)
136
- sendBtn.click(gen, inputs=[prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor], outputs=[img])
137
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import gradio as gr
 
 
3
  import numpy as np
4
  import random
5
+ from huggingface_hub import AsyncInferenceClient
6
  from translatepy import Translator
7
  import requests
8
  import re
9
  import asyncio
10
+ from PIL import Image
 
 
 
11
 
12
  translator = Translator()
13
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
14
  basemodel = "black-forest-labs/FLUX.1-dev"
15
  MAX_SEED = np.iinfo(np.int32).max
16
 
 
33
  else:
34
  return lora_add
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  async def generate_image(
37
  prompt:str,
38
  model:str,
 
41
  height:int=1024,
42
  scales:float=3.5,
43
  steps:int=24,
44
+ seed:int=-1):
45
+
46
  if seed == -1:
47
  seed = random.randint(0, MAX_SEED)
48
  seed = int(seed)
 
50
 
51
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
52
 
53
+ client = AsyncInferenceClient()
54
  try:
55
+ image = await client.text_to_image(
56
+ prompt=text,
57
+ height=height,
58
+ width=width,
59
+ guidance_scale=scales,
60
+ num_inference_steps=steps,
61
+ model=model,
62
+ )
63
  except Exception as e:
64
  raise gr.Error(f"Error in {e}")
65
 
 
67
 
68
  async def gen(
69
  prompt:str,
70
+ lora_add:str="",
71
  lora_word:str="",
72
  width:int=768,
73
  height:int=1024,
74
  scales:float=3.5,
75
  steps:int=24,
76
  seed:int=-1,
77
+ progress=gr.Progress(track_tqdm=True)
78
  ):
79
  model = enable_lora(lora_add)
80
+ print(model)
81
  image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed)
82
+ return image, seed
 
 
83
 
84
  with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
85
  gr.HTML("<h1><center>Flux Lab Light</center></h1>")
86
+ gr.HTML("<p><center>Powered By HF Inference API</center></p>")
87
  with gr.Row():
88
  with gr.Column(scale=4):
89
  with gr.Row():
90
+ img = gr.Image(type="filepath", label='flux Generated Image', height=600)
91
  with gr.Row():
92
+ prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
93
  sendBtn = gr.Button(scale=1, variant='primary')
94
+ with gr.Accordion("Advanced Options", open=True):
95
  with gr.Column(scale=1):
96
+ width = gr.Slider(
97
+ label="Width",
98
+ minimum=512,
99
+ maximum=1280,
100
+ step=8,
101
+ value=768,
102
+ )
103
+ height = gr.Slider(
104
+ label="Height",
105
+ minimum=512,
106
+ maximum=1280,
107
+ step=8,
108
+ value=1024,
109
+ )
110
+ scales = gr.Slider(
111
+ label="Guidance",
112
+ minimum=3.5,
113
+ maximum=7,
114
+ step=0.1,
115
+ value=3.5,
116
+ )
117
+ steps = gr.Slider(
118
+ label="Steps",
119
+ minimum=1,
120
+ maximum=100,
121
+ step=1,
122
+ value=24,
123
+ )
124
+ seed = gr.Slider(
125
+ label="Seeds",
126
+ minimum=-1,
127
+ maximum=MAX_SEED,
128
+ step=1,
129
+ value=-1,
130
+ )
131
+ lora_add = gr.Textbox(
132
+ label="Add Flux LoRA",
133
+ info="Copy the HF LoRA model name here",
134
+ lines=1,
135
+ placeholder="Please use Warm status model",
136
+ )
137
+ lora_word = gr.Textbox(
138
+ label="Add Flux LoRA Trigger Word",
139
+ info="Add the Trigger Word",
140
+ lines=1,
141
+ value="",
142
+ )
143
+
144
+ gr.on(
145
+ triggers=[
146
+ prompt.submit,
147
+ sendBtn.click,
148
+ ],
149
+ fn=gen,
150
+ inputs=[
151
+ prompt,
152
+ lora_add,
153
+ lora_word,
154
+ width,
155
+ height,
156
+ scales,
157
+ steps,
158
+ seed
159
+ ],
160
+ outputs=[img, seed]
161
+ )
162
+
163
+ if __name__ == "__main__":
164
+ demo.queue(api_open=False).launch(show_api=False, share=False)