Spaces:
Running
Running
salomonsky
commited on
Commit
•
9ce8f90
1
Parent(s):
812aaeb
Update app.py
Browse files
app.py
CHANGED
@@ -44,7 +44,6 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
|
|
44 |
)
|
45 |
return result[1] if isinstance(result, list) and len(result) > 1 else None
|
46 |
except Exception as e:
|
47 |
-
st.error(f"Error en el escalado: {e}")
|
48 |
return None
|
49 |
|
50 |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
|
@@ -76,7 +75,7 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
|
|
76 |
upscale_image = Image.open(upscale_image_path)
|
77 |
upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
|
78 |
progress_bar.progress(100)
|
79 |
-
image_path.unlink()
|
80 |
return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
|
81 |
else:
|
82 |
progress_bar.empty()
|
@@ -93,8 +92,7 @@ async def improve_prompt(prompt):
|
|
93 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
94 |
return improved_text
|
95 |
except Exception as e:
|
96 |
-
|
97 |
-
return ""
|
98 |
|
99 |
def get_storage():
|
100 |
files = [{"name": str(file.resolve()), "size": file.stat().st_size,}
|
@@ -111,10 +109,20 @@ def run_gen():
|
|
111 |
loop = asyncio.new_event_loop()
|
112 |
asyncio.set_event_loop(loop)
|
113 |
prompt_to_use = st.session_state.get('improved_prompt', prompt)
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
st.set_page_config(layout="wide")
|
|
|
118 |
prompt = st.sidebar.text_input("Descripción de la imagen")
|
119 |
basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
|
120 |
lora_model = st.sidebar.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
|
@@ -141,12 +149,12 @@ if st.sidebar.button("Mejorar prompt"):
|
|
141 |
|
142 |
if st.sidebar.button("Generar Imagen"):
|
143 |
with st.spinner("Generando imagen..."):
|
144 |
-
result = run_gen()
|
145 |
-
image_paths = result[0]
|
146 |
prompt_file = result[1]
|
147 |
|
148 |
-
st.write(f"Image paths: {image_paths}")
|
149 |
-
|
150 |
if image_paths:
|
151 |
if Path(image_paths).exists():
|
152 |
st.image(image_paths, caption="Imagen Generada")
|
@@ -161,13 +169,17 @@ if st.sidebar.button("Generar Imagen"):
|
|
161 |
|
162 |
files, usage = get_storage()
|
163 |
st.text(usage)
|
164 |
-
|
165 |
cols = st.columns(6)
|
166 |
prompts = get_prompts()
|
|
|
167 |
for idx, file in enumerate(files):
|
168 |
with cols[idx % 6]:
|
169 |
image = Image.open(file)
|
170 |
prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
|
171 |
prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
|
172 |
st.image(image, caption=f"Imagen {idx+1}")
|
173 |
-
st.write(f"Prompt: {prompt_text}")
|
|
|
|
|
|
|
|
|
|
44 |
)
|
45 |
return result[1] if isinstance(result, list) and len(result) > 1 else None
|
46 |
except Exception as e:
|
|
|
47 |
return None
|
48 |
|
49 |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
|
|
|
75 |
upscale_image = Image.open(upscale_image_path)
|
76 |
upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
|
77 |
progress_bar.progress(100)
|
78 |
+
image_path.unlink()
|
79 |
return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
|
80 |
else:
|
81 |
progress_bar.empty()
|
|
|
92 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
93 |
return improved_text
|
94 |
except Exception as e:
|
95 |
+
return f"Error mejorando el prompt: {e}"
|
|
|
96 |
|
97 |
def get_storage():
|
98 |
files = [{"name": str(file.resolve()), "size": file.stat().st_size,}
|
|
|
109 |
loop = asyncio.new_event_loop()
|
110 |
asyncio.set_event_loop(loop)
|
111 |
prompt_to_use = st.session_state.get('improved_prompt', prompt)
|
112 |
+
return loop.run_until_complete(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
|
113 |
+
|
114 |
+
def delete_image(image_path):
|
115 |
+
try:
|
116 |
+
if Path(image_path).exists():
|
117 |
+
Path(image_path).unlink()
|
118 |
+
st.success(f"Imagen {image_path} borrada.")
|
119 |
+
else:
|
120 |
+
st.error("El archivo de imagen no existe.")
|
121 |
+
except Exception as e:
|
122 |
+
st.error(f"Error al borrar la imagen: {e}")
|
123 |
|
124 |
st.set_page_config(layout="wide")
|
125 |
+
st.title("Generador de Imágenes FLUX")
|
126 |
prompt = st.sidebar.text_input("Descripción de la imagen")
|
127 |
basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
|
128 |
lora_model = st.sidebar.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
|
|
|
149 |
|
150 |
if st.sidebar.button("Generar Imagen"):
|
151 |
with st.spinner("Generando imagen..."):
|
152 |
+
result = run_gen()
|
153 |
+
image_paths = result[0]
|
154 |
prompt_file = result[1]
|
155 |
|
156 |
+
st.write(f"Image paths: {image_paths}")
|
157 |
+
|
158 |
if image_paths:
|
159 |
if Path(image_paths).exists():
|
160 |
st.image(image_paths, caption="Imagen Generada")
|
|
|
169 |
|
170 |
files, usage = get_storage()
|
171 |
st.text(usage)
|
|
|
172 |
cols = st.columns(6)
|
173 |
prompts = get_prompts()
|
174 |
+
|
175 |
for idx, file in enumerate(files):
|
176 |
with cols[idx % 6]:
|
177 |
image = Image.open(file)
|
178 |
prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
|
179 |
prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
|
180 |
st.image(image, caption=f"Imagen {idx+1}")
|
181 |
+
st.write(f"Prompt: {prompt_text}")
|
182 |
+
if st.button(f"Borrar Imagen {idx+1}", key=f"delete_{idx}"):
|
183 |
+
os.remove(file)
|
184 |
+
if prompt_file:
|
185 |
+
os.remove(prompt_file)
|