Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,22 +1,20 @@
|
|
1 |
from pathlib import Path
|
2 |
from PIL import Image
|
3 |
import streamlit as st
|
4 |
-
import
|
5 |
-
from
|
6 |
-
from huggingface_hub import InferenceClient
|
7 |
-
import os
|
8 |
-
import random
|
9 |
-
import numpy as np
|
10 |
-
import yaml
|
11 |
-
import time
|
12 |
-
import logging
|
13 |
-
from dataclasses import dataclass, field
|
14 |
from typing import List
|
|
|
15 |
|
16 |
st.set_page_config(layout="wide")
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
try:
|
19 |
-
with open("config.yaml", "r") as file:
|
20 |
credentials = yaml.safe_load(file)
|
21 |
except Exception as e:
|
22 |
st.error(f"Error al cargar el archivo de configuraci贸n: {e}")
|
@@ -28,149 +26,93 @@ class AppConfig:
|
|
28 |
CLEANUP_DAYS: int = 7
|
29 |
|
30 |
MAX_SEED = AppConfig.MAX_SEED
|
31 |
-
client = InferenceClient()
|
32 |
DATA_PATH = Path("./data")
|
33 |
DATA_PATH.mkdir(exist_ok=True)
|
34 |
-
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN")
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
app = FaceAnalysis(name='buffalo_l')
|
39 |
-
app.prepare(ctx_id=0, det_size=(640, 640))
|
40 |
-
swapper = insightface.model_zoo.get_model('onix.onnx')
|
41 |
-
return app, swapper
|
42 |
|
43 |
-
|
44 |
-
try:
|
45 |
-
upscale_client = InferenceClient("fal/AuraSR-v2", hf_token=HF_TOKEN_UPSCALER)
|
46 |
-
result = upscale_client.predict(input_image=open(img_path, "rb").read(), prompt=prompt, upscale_factor=upscale_factor)
|
47 |
-
return result[1] if isinstance(result, list) and len(result) > 1 else None
|
48 |
-
except Exception as e:
|
49 |
-
st.error(f"Error al mejorar la imagen: {e}")
|
50 |
-
return None
|
51 |
-
|
52 |
-
def cleanup_old_images(max_age_days=AppConfig.CLEANUP_DAYS):
|
53 |
-
current_time = time.time()
|
54 |
-
for image_file in DATA_PATH.glob("*.jpg"):
|
55 |
-
if current_time - image_file.stat().st_mtime > max_age_days * 86400:
|
56 |
-
os.remove(image_file)
|
57 |
|
58 |
-
def authenticate_user(username, password):
|
59 |
return username == credentials["username"] and password == credentials["password"]
|
60 |
|
61 |
-
def list_saved_images():
|
62 |
return sorted(DATA_PATH.glob("*.jpg"), key=lambda x: x.stat().st_mtime, reverse=True)
|
63 |
|
64 |
-
def
|
65 |
-
|
66 |
-
|
67 |
-
def get_face(faces, face_id):
|
68 |
-
if not faces or len(faces) < face_id:
|
69 |
-
raise ValueError("Rostro no disponible.")
|
70 |
-
return faces[face_id - 1]
|
71 |
-
|
72 |
-
def swap_faces(source_image, source_face_index, destination_image, destination_face_index):
|
73 |
-
faces = sort_faces(app.get(source_image))
|
74 |
-
source_face = get_face(faces, source_face_index)
|
75 |
-
res_faces = sort_faces(app.get(destination_image))
|
76 |
-
if destination_face_index > len(res_faces) or destination_face_index < 1:
|
77 |
-
raise ValueError("脥ndice de rostro de destino no v谩lido.")
|
78 |
-
res_face = get_face(res_faces, destination_face_index)
|
79 |
-
result = swapper.get(destination_image, res_face, source_face, paste_back=True)
|
80 |
-
return result
|
81 |
-
|
82 |
-
def generate_image(prompt, width, height, seed, model_name):
|
83 |
try:
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
width=width,
|
90 |
-
model=model_name,
|
91 |
-
seed=seed
|
92 |
-
)
|
93 |
-
return image, seed
|
94 |
except Exception as e:
|
95 |
-
st.
|
96 |
-
return
|
97 |
-
|
98 |
-
def display_gallery():
|
99 |
-
st.header("Galer铆a de Im谩genes Guardadas")
|
100 |
-
images = list_saved_images()
|
101 |
-
if images:
|
102 |
-
cols = st.columns(8)
|
103 |
-
for i, image_file in enumerate(images):
|
104 |
-
with cols[i % 8]:
|
105 |
-
st.image(str(image_file), caption=image_file.name, use_column_width=True)
|
106 |
-
prompt = get_prompt_for_image(image_file.name)
|
107 |
-
st.write(prompt[:100])
|
108 |
-
if st.button(f"FaceSwap", key=f"select_{i}_{image_file.name}"):
|
109 |
-
st.session_state['generated_image_path'] = str(image_file)
|
110 |
-
st.success("Imagen seleccionada")
|
111 |
-
if st.button(f"Borrar", key=f"delete_{i}_{image_file.name}"):
|
112 |
-
if image_file.exists():
|
113 |
-
os.remove(image_file)
|
114 |
-
st.success("Imagen borrada")
|
115 |
-
display_gallery()
|
116 |
-
|
117 |
-
def save_prompt(image_name, prompt):
|
118 |
-
with open(DATA_PATH / "prompts.txt", "a") as f:
|
119 |
-
f.write(f"{image_name}: {prompt[:100]}\n")
|
120 |
|
121 |
-
def
|
122 |
-
|
123 |
-
|
124 |
-
"With this text, generate a descriptive and photorealistic txt2img prompt in English in 200 characters maximum: " + text,
|
125 |
-
model="mistralai/Mixtral-8x7B-v0.1",
|
126 |
-
max_length=200
|
127 |
-
)
|
128 |
-
return enhanced[:100]
|
129 |
-
except:
|
130 |
-
return text[:100]
|
131 |
|
132 |
def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
133 |
instructions = [
|
134 |
-
"
|
135 |
-
"
|
136 |
-
"
|
137 |
-
"
|
138 |
-
"
|
139 |
-
"
|
140 |
-
"
|
141 |
-
"
|
142 |
]
|
143 |
-
|
144 |
if use_enhanced:
|
145 |
-
prompts = []
|
146 |
-
|
147 |
-
instruction = instructions[i % len(instructions)]
|
148 |
-
enhanced_prompt = enhance_prompt(f"{instruction}{prompt}")
|
149 |
-
prompts.append(enhanced_prompt)
|
150 |
-
else:
|
151 |
prompts = [prompt] * num_variants
|
152 |
return prompts
|
153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
def gen(prompts, width, height, model_name, num_variants=8):
|
155 |
images = []
|
156 |
seeds = []
|
157 |
-
|
158 |
while len(seeds) < num_variants:
|
159 |
seed = random.randint(0, MAX_SEED)
|
160 |
-
if seed not in seeds:
|
161 |
seeds.append(seed)
|
162 |
|
163 |
for i in range(num_variants):
|
164 |
current_prompt = prompts[i] if len(prompts) > i else prompts[-1]
|
165 |
with st.spinner(f"Generando imagen {i+1}/{num_variants}"):
|
166 |
-
image, used_seed = generate_image(current_prompt, width, height, seeds[i], model_name)
|
167 |
if image:
|
168 |
image_path = DATA_PATH / f"generated_image_{used_seed}.jpg"
|
169 |
image.save(image_path)
|
170 |
-
save_prompt(f"generated_image_{used_seed}.jpg",
|
171 |
-
images.append(str(image_path))
|
172 |
st.success(f"Imagen {i+1} generada")
|
173 |
-
|
174 |
return images
|
175 |
|
176 |
def get_prompt_for_image(image_name):
|
@@ -178,20 +120,41 @@ def get_prompt_for_image(image_name):
|
|
178 |
with open(DATA_PATH / "prompts.txt", "r") as f:
|
179 |
for line in f:
|
180 |
if line.startswith(image_name):
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
183 |
return "No hay prompt asociado"
|
184 |
return "No hay prompt asociado"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
def login_form():
|
187 |
st.title("Iniciar Sesi贸n")
|
188 |
username = st.text_input("Usuario", value="admin")
|
189 |
-
password = st.text_input("Contrase帽a", value="", type="password")
|
190 |
if st.button("Iniciar Sesi贸n"):
|
191 |
if authenticate_user(username, password):
|
192 |
st.session_state['authenticated'] = True
|
193 |
st.success("Autenticaci贸n exitosa.")
|
194 |
-
else:
|
195 |
st.error("Credenciales incorrectas. Intenta de nuevo.")
|
196 |
|
197 |
def upload_image_to_gallery():
|
@@ -203,19 +166,20 @@ def upload_image_to_gallery():
|
|
203 |
save_prompt(uploaded_image.name, "uploaded by user")
|
204 |
st.sidebar.success(f"Imagen subida: {image_path}")
|
205 |
|
206 |
-
app, swapper = prepare_face_app()
|
207 |
-
|
208 |
def main():
|
209 |
-
if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
|
210 |
login_form()
|
211 |
return
|
212 |
-
|
213 |
-
st.title("Flux +Upscale +Prompt Enhancer
|
214 |
-
|
|
|
|
|
|
|
|
|
215 |
prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
|
216 |
format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
|
217 |
model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev"])
|
218 |
-
upscale_checkbox = st.sidebar.checkbox("Escalar imagen")
|
219 |
prompt_enhance = st.sidebar.checkbox("Mejorar Prompt", True)
|
220 |
num_variants = st.sidebar.slider("N煤mero de im谩genes", 1, 8, 8)
|
221 |
width, height = (720, 1280) if format_option == "9:16" else (1280, 720) if format_option == "16:9" else (1280, 1280)
|
@@ -223,16 +187,17 @@ def main():
|
|
223 |
if prompt:
|
224 |
prompts = generate_variations(prompt, num_variants=num_variants, use_enhanced=prompt_enhance)
|
225 |
if st.sidebar.button("Generar Im谩genes"):
|
226 |
-
|
227 |
-
|
228 |
-
if generated_image_path and upscale_checkbox:
|
229 |
-
upscale_factor = st.sidebar.slider("Factor de Escalado", 1, 4, 2)
|
230 |
-
improved_image = get_upscale_finegrain(prompt, generated_image_path, upscale_factor)
|
231 |
-
if improved_image:
|
232 |
-
st.image(improved_image, caption="Imagen Escalada", use_column_width=True)
|
233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
upload_image_to_gallery()
|
235 |
display_gallery()
|
236 |
|
237 |
-
if __name__ == "__main__":
|
238 |
main()
|
|
|
1 |
from pathlib import Path
|
2 |
from PIL import Image
|
3 |
import streamlit as st
|
4 |
+
import os, random, numpy as np, yaml, time
|
5 |
+
from dataclasses import dataclass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
from typing import List
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
|
9 |
st.set_page_config(layout="wide")
|
10 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
+
|
12 |
+
if not HF_TOKEN:
|
13 |
+
st.error("Error en el token! 'HF_TOKEN'.")
|
14 |
+
st.stop()
|
15 |
|
16 |
try:
|
17 |
+
with open("config.yaml", "r") as file:
|
18 |
credentials = yaml.safe_load(file)
|
19 |
except Exception as e:
|
20 |
st.error(f"Error al cargar el archivo de configuraci贸n: {e}")
|
|
|
26 |
CLEANUP_DAYS: int = 7
|
27 |
|
28 |
MAX_SEED = AppConfig.MAX_SEED
|
|
|
29 |
DATA_PATH = Path("./data")
|
30 |
DATA_PATH.mkdir(exist_ok=True)
|
|
|
31 |
|
32 |
+
def get_inference_client():
|
33 |
+
return InferenceClient(token=HF_TOKEN)
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
client = get_inference_client()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
def authenticate_user(username, password):
|
38 |
return username == credentials["username"] and password == credentials["password"]
|
39 |
|
40 |
+
def list_saved_images():
|
41 |
return sorted(DATA_PATH.glob("*.jpg"), key=lambda x: x.stat().st_mtime, reverse=True)
|
42 |
|
43 |
+
def enhance_prompt(text, client=client):
|
44 |
+
if not client:
|
45 |
+
return text[:200]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
try:
|
47 |
+
enhanced = client.text_generation(
|
48 |
+
"Generate a photorealistic, detailed txt2img prompt: " + text,
|
49 |
+
model="mistralai/Mixtral-8x7B-Instruct-v0.1",)
|
50 |
+
return enhanced[:200]
|
51 |
+
|
|
|
|
|
|
|
|
|
|
|
52 |
except Exception as e:
|
53 |
+
st.warning(f"Prompt enhancement error: {e}")
|
54 |
+
return text[:200]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
def save_prompt(image_name, enhanced_prompt):
|
57 |
+
with open(DATA_PATH / "prompts.txt", "a") as f:
|
58 |
+
f.write(f"{image_name}: {enhanced_prompt}\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
61 |
instructions = [
|
62 |
+
"Photorealistic description for txt2img prompt: ",
|
63 |
+
"Creative, realistic text-to-image prompt: ",
|
64 |
+
"Descriptive, true-to-life txt2img prompt: ",
|
65 |
+
"Naturalistic scene with detailed prompt: ",
|
66 |
+
"Realistic, elegant txt2img prompt: ",
|
67 |
+
"Visually dynamic, hyperrealistic prompt: ",
|
68 |
+
"Cinematic txt2img with hyperrealistic elements: ",
|
69 |
+
"Lifelike txt2img, focusing on photorealistic depth: "
|
70 |
]
|
|
|
71 |
if use_enhanced:
|
72 |
+
prompts = [enhance_prompt(f"{instructions[i % len(instructions)]}{prompt}") for i in range(num_variants)]
|
73 |
+
else:
|
|
|
|
|
|
|
|
|
74 |
prompts = [prompt] * num_variants
|
75 |
return prompts
|
76 |
|
77 |
+
def generate_image(prompt, width, height, seed, model_name, client=client):
|
78 |
+
if not client:
|
79 |
+
st.error("No Hugging Face client available")
|
80 |
+
return None, seed, None
|
81 |
+
|
82 |
+
try:
|
83 |
+
with st.spinner("Generando imagen..."):
|
84 |
+
seed = int(seed) if seed != -1 else random.randint(0, AppConfig.MAX_SEED)
|
85 |
+
enhanced_prompt = enhance_prompt(prompt)
|
86 |
+
image = client.text_to_image(
|
87 |
+
prompt=enhanced_prompt,
|
88 |
+
height=height,
|
89 |
+
width=width,
|
90 |
+
model=model_name,
|
91 |
+
seed=seed
|
92 |
+
)
|
93 |
+
return image, seed, enhanced_prompt
|
94 |
+
except Exception as e:
|
95 |
+
st.error(f"Image generation error: {e}")
|
96 |
+
return None, seed, None
|
97 |
+
|
98 |
def gen(prompts, width, height, model_name, num_variants=8):
|
99 |
images = []
|
100 |
seeds = []
|
|
|
101 |
while len(seeds) < num_variants:
|
102 |
seed = random.randint(0, MAX_SEED)
|
103 |
+
if seed not in seeds:
|
104 |
seeds.append(seed)
|
105 |
|
106 |
for i in range(num_variants):
|
107 |
current_prompt = prompts[i] if len(prompts) > i else prompts[-1]
|
108 |
with st.spinner(f"Generando imagen {i+1}/{num_variants}"):
|
109 |
+
image, used_seed, enhanced_prompt = generate_image(current_prompt, width, height, seeds[i], model_name)
|
110 |
if image:
|
111 |
image_path = DATA_PATH / f"generated_image_{used_seed}.jpg"
|
112 |
image.save(image_path)
|
113 |
+
save_prompt(f"generated_image_{used_seed}.jpg", enhanced_prompt)
|
114 |
+
images.append((str(image_path), enhanced_prompt))
|
115 |
st.success(f"Imagen {i+1} generada")
|
|
|
116 |
return images
|
117 |
|
118 |
def get_prompt_for_image(image_name):
|
|
|
120 |
with open(DATA_PATH / "prompts.txt", "r") as f:
|
121 |
for line in f:
|
122 |
if line.startswith(image_name):
|
123 |
+
original_prompt = line.split(": ", 1)[1].strip()
|
124 |
+
image_path = DATA_PATH / image_name
|
125 |
+
|
126 |
+
if image_path.exists():
|
127 |
+
return original_prompt
|
128 |
+
except FileNotFoundError:
|
129 |
return "No hay prompt asociado"
|
130 |
return "No hay prompt asociado"
|
131 |
+
|
132 |
+
def display_gallery():
|
133 |
+
st.header("Galer铆a de Im谩genes Guardadas")
|
134 |
+
images = list_saved_images()
|
135 |
+
|
136 |
+
if images:
|
137 |
+
cols = st.columns(4)
|
138 |
+
for i, image_file in enumerate(images):
|
139 |
+
with cols[i % 4]:
|
140 |
+
st.image(str(image_file), use_column_width=True)
|
141 |
+
prompt = get_prompt_for_image(image_file.name)
|
142 |
+
st.caption(prompt[:250])
|
143 |
+
if st.button(f"Borrar", key=f"delete_{i}_{image_file}"):
|
144 |
+
if image_file.exists():
|
145 |
+
os.remove(image_file)
|
146 |
+
st.success("Imagen borrada")
|
147 |
+
st.rerun()
|
148 |
|
149 |
def login_form():
|
150 |
st.title("Iniciar Sesi贸n")
|
151 |
username = st.text_input("Usuario", value="admin")
|
152 |
+
password = st.text_input("Contrase帽a", value="flux3x", type="password")
|
153 |
if st.button("Iniciar Sesi贸n"):
|
154 |
if authenticate_user(username, password):
|
155 |
st.session_state['authenticated'] = True
|
156 |
st.success("Autenticaci贸n exitosa.")
|
157 |
+
else:
|
158 |
st.error("Credenciales incorrectas. Intenta de nuevo.")
|
159 |
|
160 |
def upload_image_to_gallery():
|
|
|
166 |
save_prompt(uploaded_image.name, "uploaded by user")
|
167 |
st.sidebar.success(f"Imagen subida: {image_path}")
|
168 |
|
|
|
|
|
169 |
def main():
|
170 |
+
if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
|
171 |
login_form()
|
172 |
return
|
173 |
+
|
174 |
+
st.title("Flux +Upscale +Prompt Enhancer")
|
175 |
+
|
176 |
+
if not client:
|
177 |
+
st.error("No se pudo establecer conexi贸n con Hugging Face. Verifique sus tokens.")
|
178 |
+
return
|
179 |
+
|
180 |
prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
|
181 |
format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
|
182 |
model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev"])
|
|
|
183 |
prompt_enhance = st.sidebar.checkbox("Mejorar Prompt", True)
|
184 |
num_variants = st.sidebar.slider("N煤mero de im谩genes", 1, 8, 8)
|
185 |
width, height = (720, 1280) if format_option == "9:16" else (1280, 720) if format_option == "16:9" else (1280, 1280)
|
|
|
187 |
if prompt:
|
188 |
prompts = generate_variations(prompt, num_variants=num_variants, use_enhanced=prompt_enhance)
|
189 |
if st.sidebar.button("Generar Im谩genes"):
|
190 |
+
generated_images = gen(prompts, width, height, model_option, num_variants)
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
+
st.header("Im谩genes Generadas")
|
193 |
+
cols = st.columns(4)
|
194 |
+
for i, (image_path, image_prompt) in enumerate(generated_images):
|
195 |
+
with cols[i % 4]:
|
196 |
+
st.image(image_path, use_column_width=True)
|
197 |
+
st.caption(image_prompt)
|
198 |
+
|
199 |
upload_image_to_gallery()
|
200 |
display_gallery()
|
201 |
|
202 |
+
if __name__ == "__main__":
|
203 |
main()
|