salomonsky commited on
Commit
90986f4
1 Parent(s): e6e9673

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +198 -111
app.py CHANGED
@@ -1,137 +1,224 @@
1
- import os, zipfile, yaml, numpy as np, random, asyncio
 
 
2
  from pathlib import Path
3
  from PIL import Image
 
4
  import streamlit as st
5
- from huggingface_hub import AsyncInferenceClient
6
- from moviepy.editor import ImageSequenceClip
7
-
8
- try:
9
- credentials = yaml.safe_load(open("config.yaml"))
10
- except:
11
- st.error("Error al cargar el archivo de configuración.")
12
- credentials = {"username": "", "password": ""}
13
-
14
- MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
15
  client = AsyncInferenceClient()
 
16
  DATA_PATH = Path("./data")
17
- PREDEFINED_SEED = random.randint(0, MAX_SEED)
18
  DATA_PATH.mkdir(exist_ok=True)
19
 
20
- async def generate_image(prompt, width, height, seed, model_name):
21
- try:
22
- return await client.text_to_image(prompt=prompt, height=height, width=width, model=model_name), int(seed)
23
- except Exception as e:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  return f"Error al generar imagen: {e}", None
25
 
26
- def save_prompt(prompt_text, seed):
27
- try:
 
 
 
 
 
 
 
 
 
 
28
  prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
29
- open(prompt_file_path, "w").write(prompt_text)
 
30
  return prompt_file_path
31
- except Exception as e:
32
  st.error(f"Error al guardar el prompt: {e}")
 
33
 
34
- async def gen(prompt, width, height, model_name):
35
- seed, progress_bar = PREDEFINED_SEED, st.progress(0)
36
- image, seed = await generate_image(prompt, width, height, seed, model_name)
37
- progress_bar.progress(100)
38
- if isinstance(image, str) and image.startswith("Error"):
39
- return [image, None]
40
- return [str(save_image(image, seed)), str(save_prompt(prompt, seed))]
41
-
42
- def save_image(image, seed):
43
- try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  image_path = DATA_PATH / f"image_{seed}.jpg"
45
  image.save(image_path, format="JPEG")
46
  return image_path
47
- except Exception as e:
48
  st.error(f"Error al guardar la imagen: {e}")
49
-
50
- def get_storage():
51
- files = sorted([file for file in DATA_PATH.glob("*.jpg")], key=lambda x: x.stat().st_mtime, reverse=True)
52
- return [str(file.resolve()) for file in files], f"Uso total: {sum([file.stat().st_size for file in files])/(1024.0 ** 3):.3f}GB"
53
-
54
- def get_prompts():
55
- return {file.stem.replace("prompt_", ""): file for file in DATA_PATH.glob("*.txt")}
56
-
57
- def delete_all_images():
58
- try:
59
- [os.remove(file) for file in DATA_PATH.glob("*.jpg") + DATA_PATH.glob("*.txt")]
60
- st.success("Todas las imágenes y prompts han sido borrados.")
61
- except Exception as e:
62
- st.error(f"Error al borrar archivos: {e}")
63
-
64
- def download_images_as_zip():
65
- zip_path = DATA_PATH / "images.zip"
66
- zipf = zipfile.ZipFile(zip_path, 'w')
67
- [zipf.write(file, arcname=file.name) for file in DATA_PATH.glob("*.jpg")]
68
- with open(zip_path, "rb") as zip_file:
69
- st.download_button(label="Descargar imágenes en .zip", data=zip_file, file_name="images.zip", mime="application/zip")
70
-
71
- def create_video_from_images():
72
- try:
73
- image_files = sorted(DATA_PATH.glob("*.jpg"))
74
- image_sequence = [Image.open(image_file) for image_file in image_files]
75
- except:
76
- st.error("No hay imágenes disponibles para crear un video.")
77
- return
78
- video_path = DATA_PATH / "output_video.mp4"
79
- ImageSequenceClip([np.array(img) for img in image_sequence], fps=1).write_videofile(str(video_path), codec="libx264")
80
- return video_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  def main():
83
  st.set_page_config(layout="wide")
84
- st.title("Generador de Imágenes Flux")
85
 
86
- if "authenticated" not in st.session_state:
87
- st.session_state.authenticated = False
88
-
89
- if not st.session_state.authenticated:
90
- username, password = st.text_input("Usuario"), st.text_input("Contraseña", type="password")
91
- if st.button("Ingresar") and username == credentials["username"] and password == credentials["password"]:
92
- st.session_state.authenticated = True
93
- st.success("Inicio de sesión exitoso.")
94
- elif st.button("Ingresar"):
95
- st.error("Usuario o contraseña incorrectos.")
96
  return
97
-
98
- prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=500)
 
 
 
99
  format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9"])
100
- model_option = st.sidebar.selectbox("Modelo", ["enhanceaiteam/Flux-Uncensored-V2", "enhanceaiteam/Flux-uncensored"])
101
- width, height = (720, 1280) if format_option == "9:16" else (1280, 720)
 
 
 
 
102
 
103
  if st.sidebar.button("Generar Imagen"):
104
- with st.spinner("Generando imagen..."):
105
- result = asyncio.run(gen(prompt, width, height, model_option))
106
- image_paths, prompt_file = result[0], result[1]
107
- if Path(image_paths).exists():
108
- st.image(image_paths, caption="Imagen Generada")
109
- if prompt_file and Path(prompt_file).exists():
110
- st.write(f"Prompt utilizado: {Path(prompt_file).read_text()}")
111
-
112
- files, usage = get_storage()
113
- st.text(usage)
114
- cols, prompts = st.columns(6), get_prompts()
115
-
116
- for idx, file in enumerate(files):
117
- with cols[idx % 6]:
118
- image = Image.open(file)
119
- prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
120
- st.image(image, caption=f"Imagen {idx+1}")
121
- st.write(f"Prompt: {Path(prompt_file).read_text() if prompt_file else 'No disponible'}")
122
- if st.button(f"Borrar Imagen {idx+1}", key=f"delete_{idx}"):
123
- os.remove(file)
124
- if prompt_file: os.remove(prompt_file)
125
- st.success(f"Imagen {idx+1} y su prompt fueron borrados.")
126
-
127
- if st.sidebar.button("Borrar todas las imágenes"):
128
- delete_all_images()
129
- if st.sidebar.button("Descargar imágenes en .zip"):
130
- download_images_as_zip()
131
- if st.button("Generar video con las imágenes"):
132
- video_path = create_video_from_images()
133
- if video_path:
134
- st.video(str(video_path), format="video/mp4")
135
-
136
- if __name__ == "__main__":
137
  main()
 
1
+ import os
2
+ import numpy as np
3
+ import random
4
  from pathlib import Path
5
  from PIL import Image
6
+ from insightface.app import FaceAnalysis
7
  import streamlit as st
8
+ from huggingface_hub import InferenceClient, AsyncInferenceClient
9
+ from gradio_client import Client, handle_file
10
+ import asyncio
11
+ import insightface
12
+ from concurrent.futures import ThreadPoolExecutor
13
+ import yaml
14
+
15
+ try:
16
+ with open("config.yaml", "r") as file:
17
+ credentials = yaml.safe_load(file)
18
+ except Exception as e:
19
+ st.error(f"Error al cargar el archivo de configuración: {e}")
20
+ credentials = {"username": "", "password": ""}
21
+
22
+ MAX_SEED = np.iinfo(np.int32).max
23
+ HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
24
  client = AsyncInferenceClient()
25
+ llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
26
  DATA_PATH = Path("./data")
 
27
  DATA_PATH.mkdir(exist_ok=True)
28
 
29
+ def prepare_face_app():
30
+ app = FaceAnalysis(name='buffalo_l')
31
+ app.prepare(ctx_id=0, det_size=(640, 640))
32
+ swapper = insightface.model_zoo.get_model('onix.onnx')
33
+ return app, swapper
34
+
35
+ app, swapper = prepare_face_app()
36
+
37
+ def run_async(func):
38
+ loop = asyncio.new_event_loop()
39
+ asyncio.set_event_loop(loop)
40
+ executor = ThreadPoolExecutor(max_workers=1)
41
+ result = loop.run_in_executor(executor, func)
42
+ return loop.run_until_complete(result)
43
+
44
+ async def generate_image(combined_prompt, model, width, height, scales, steps, seed):
45
+ try:
46
+ if seed == -1:
47
+ seed = random.randint(0, MAX_SEED)
48
+ seed = int(seed)
49
+ image = await client.text_to_image(
50
+ prompt=combined_prompt, height=height, width=width, guidance_scale=scales,
51
+ num_inference_steps=steps, model=model
52
+ )
53
+ return image, seed
54
+ except Exception as e:
55
  return f"Error al generar imagen: {e}", None
56
 
57
+ def get_upscale_finegrain(prompt, img_path, upscale_factor):
58
+ try:
59
+ client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
60
+ result = client.predict(
61
+ input_image=handle_file(img_path), prompt=prompt, upscale_factor=upscale_factor
62
+ )
63
+ return result[1] if isinstance(result, list) and len(result) > 1 else None
64
+ except Exception as e:
65
+ return None
66
+
67
+ def save_prompt(prompt_text, seed):
68
+ try:
69
  prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
70
+ with open(prompt_file_path, "w") as prompt_file:
71
+ prompt_file.write(prompt_text)
72
  return prompt_file_path
73
+ except Exception as e:
74
  st.error(f"Error al guardar el prompt: {e}")
75
+ return None
76
 
77
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language):
78
+ combined_prompt = prompt
79
+ if process_enhancer:
80
+ improved_prompt = await improve_prompt(prompt, language)
81
+ combined_prompt = f"{prompt} {improved_prompt}"
82
+
83
+ if seed == -1:
84
+ seed = random.randint(0, MAX_SEED)
85
+ seed = int(seed)
86
+ progress_bar = st.progress(0)
87
+ image, seed = await generate_image(combined_prompt, basemodel, width, height, scales, steps, seed)
88
+ progress_bar.progress(50)
89
+
90
+ if isinstance(image, str) and image.startswith("Error"):
91
+ progress_bar.empty()
92
+ return [image, None, combined_prompt]
93
+
94
+ image_path = save_image(image, seed)
95
+ prompt_file_path = save_prompt(combined_prompt, seed)
96
+
97
+ if process_upscale:
98
+ upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
99
+ if upscale_image_path:
100
+ upscale_image = Image.open(upscale_image_path)
101
+ upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
102
+ progress_bar.progress(100)
103
+ image_path.unlink()
104
+ return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
105
+ else:
106
+ progress_bar.empty()
107
+ return [str(image_path), str(prompt_file_path)]
108
+ else:
109
+ progress_bar.progress(100)
110
+ return [str(image_path), str(prompt_file_path)]
111
+
112
+ async def improve_prompt(prompt, language):
113
+ try:
114
+ instruction_en = "With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add illumination, atmosphere, cinematic elements, and characters if need it..."
115
+ instruction_es = "Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, con iluminación, atmósfera, elementos cinematográficos y en su caso personajes..."
116
+ instruction = instruction_en if language == "en" else instruction_es
117
+ formatted_prompt = f"{prompt}: {instruction}"
118
+ response = llm_client.text_generation(formatted_prompt, max_new_tokens=500)
119
+ improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
120
+ return improved_text[:500] if len(improved_text) > 500 else improved_text
121
+ except Exception as e:
122
+ return f"Error mejorando el prompt: {e}"
123
+
124
+ def save_image(image, seed):
125
+ try:
126
  image_path = DATA_PATH / f"image_{seed}.jpg"
127
  image.save(image_path, format="JPEG")
128
  return image_path
129
+ except Exception as e:
130
  st.error(f"Error al guardar la imagen: {e}")
131
+ return None
132
+
133
+ def get_storage():
134
+ files = [file for file in DATA_PATH.glob("*.jpg") if file.is_file()]
135
+ files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
136
+ usage = sum([file.stat().st_size for file in files])
137
+ return [str(file.resolve()) for file in files], f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
138
+
139
+ def get_prompts():
140
+ prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
141
+ return {file.stem.replace("prompt_", ""): file for file in prompt_files}
142
+
143
+ def delete_image(image_path):
144
+ try:
145
+ if Path(image_path).exists():
146
+ Path(image_path).unlink()
147
+ st.success(f"Imagen {image_path} borrada.")
148
+ else:
149
+ st.error("El archivo de imagen no existe.")
150
+ except Exception as e:
151
+ st.error(f"Error al borrar la imagen: {e}")
152
+
153
+ def authenticate_user(username, password, credentials):
154
+ return username == credentials["username"] and password == credentials["password"]
155
+
156
+ def login_form(credentials):
157
+ st.title("Iniciar Sesión")
158
+ username, password = st.text_input("Usuario"), st.text_input("Contraseña", type="password")
159
+ if st.button("Iniciar Sesión") and authenticate_user(username, password, credentials):
160
+ st.session_state['authenticated'] = True
161
+
162
+ def sort_faces(faces):
163
+ return sorted(faces, key=lambda x: x.bbox[0])
164
+
165
+ def get_face(faces, face_id):
166
+ if not faces: raise ValueError("No se encontraron rostros.")
167
+ if len(faces) < face_id or face_id < 1:
168
+ raise ValueError(f"Solo hay {len(faces)} rostros, pediste el {face_id}.")
169
+ return faces[face_id - 1]
170
+
171
+ def swap_faces(source_image, source_face_index, destination_image):
172
+ faces = sort_faces(app.get(source_image))
173
+ source_face = get_face(faces, source_face_index)
174
+ res_faces = sort_faces(app.get(destination_image))
175
+ res_face = get_face(res_faces, 1)
176
+ result = swapper.get(destination_image, res_face, source_face, paste_back=True)
177
+ return result
178
 
179
  def main():
180
  st.set_page_config(layout="wide")
181
+ login_form(credentials)
182
 
183
+ if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
184
+ st.warning("Por favor, inicia sesión para acceder a la aplicación.")
 
 
 
 
 
 
 
 
185
  return
186
+
187
+ prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=900)
188
+ process_enhancer = st.sidebar.checkbox("Mejorar Prompt", value=False)
189
+ language = st.sidebar.selectbox("Idioma", ["en", "es"])
190
+ basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-DEV", "black-forest-labs/FLUX.1-schnell"])
191
  format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9"])
192
+ process_upscale = st.sidebar.checkbox("Procesar Escalador", value=False)
193
+ upscale_factor = st.sidebar.selectbox("Factor de Escala", [2, 4, 8], index=0)
194
+ scales = st.sidebar.slider("Escalado", 1, 20, 10)
195
+ steps = st.sidebar.slider("Pasos", 1, 100, 20)
196
+ seed = st.sidebar.number_input("Semilla", value=-1)
197
+ width, height = (1080, 1920) if format_option == "9:16" else (1920, 1080)
198
 
199
  if st.sidebar.button("Generar Imagen"):
200
+ with st.spinner("Generando..."):
201
+ # Llamada a la función asincrónica desde un evento
202
+ image_path, prompt_file_path = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language))
203
+ if image_path:
204
+ st.image(image_path, caption="Imagen Generada", use_column_width=True)
205
+ st.download_button("Descargar Imagen", image_path)
206
+
207
+ if st.sidebar.button("Ver Almacenamiento"):
208
+ files, usage = get_storage()
209
+ st.write(usage)
210
+ for file in files:
211
+ st.write(file)
212
+
213
+ if st.sidebar.button("Ver Prompts"):
214
+ prompts = get_prompts()
215
+ for key, path in prompts.items():
216
+ st.write(f"{key}: {path}")
217
+
218
+ if st.sidebar.button("Borrar Imagen"):
219
+ image_to_delete = st.sidebar.text_input("Ruta de la imagen a borrar")
220
+ if image_to_delete:
221
+ delete_image(image_to_delete)
222
+
223
+ if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
224
  main()