Update app.py
Browse files
app.py
CHANGED
@@ -275,6 +275,40 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
|
|
275 |
else:
|
276 |
return images[0], mask_gray
|
277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
279 |
def clear_gpu_memory():
|
280 |
torch.cuda.empty_cache()
|
@@ -315,87 +349,7 @@ def tryon():
|
|
315 |
'mask_image': mask_base64
|
316 |
})
|
317 |
|
318 |
-
@app.route('/tryon-v2', methods=['POST'])
|
319 |
-
def tryon_v2():
|
320 |
-
|
321 |
-
data = request.json
|
322 |
-
human_image_data = data['human_image']
|
323 |
-
garment_image_data = data['garment_image']
|
324 |
|
325 |
-
# Process images (base64 ou URL)
|
326 |
-
human_image = process_image(human_image_data)
|
327 |
-
garment_image = process_image(garment_image_data)
|
328 |
-
|
329 |
-
description = data.get('description')
|
330 |
-
use_auto_mask = data.get('use_auto_mask', True)
|
331 |
-
use_auto_crop = data.get('use_auto_crop', False)
|
332 |
-
denoise_steps = int(data.get('denoise_steps', 30))
|
333 |
-
seed = int(data.get('seed', random.randint(0, 9999999)))
|
334 |
-
categorie = data.get('categorie', 'upper_body')
|
335 |
-
|
336 |
-
# Vérifie si 'mask_image' est présent dans les données
|
337 |
-
mask_image = None
|
338 |
-
if 'mask_image' in data:
|
339 |
-
mask_image_data = data['mask_image']
|
340 |
-
mask_image = process_image(mask_image_data)
|
341 |
-
|
342 |
-
human_dict = {
|
343 |
-
'background': human_image,
|
344 |
-
'layers': [mask_image] if not use_auto_mask else None,
|
345 |
-
'composite': None
|
346 |
-
}
|
347 |
-
output_image, mask_image = start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed , categorie)
|
348 |
-
return jsonify({
|
349 |
-
'image_id': save_image(output_image)
|
350 |
-
})
|
351 |
-
|
352 |
-
@spaces.GPU
|
353 |
-
def generate_mask(human_img, categorie='upper_body'):
|
354 |
-
device = "cuda"
|
355 |
-
openpose_model.preprocessor.body_estimation.model.to(device)
|
356 |
-
pipe.to(device)
|
357 |
-
|
358 |
-
try:
|
359 |
-
# Redimensionner l'image pour le modèle
|
360 |
-
human_img_resized = human_img.convert("RGB").resize((384, 512))
|
361 |
-
|
362 |
-
# Générer les points clés et le masque
|
363 |
-
keypoints = openpose_model(human_img_resized)
|
364 |
-
model_parse, _ = parsing_model(human_img_resized)
|
365 |
-
mask, _ = get_mask_location('hd', categorie, model_parse, keypoints)
|
366 |
-
|
367 |
-
# Redimensionner le masque à la taille d'origine de l'image
|
368 |
-
mask_resized = mask.resize(human_img.size)
|
369 |
-
|
370 |
-
return mask_resized
|
371 |
-
except Exception as e:
|
372 |
-
logging.error(f"Error generating mask: {e}")
|
373 |
-
raise e
|
374 |
-
|
375 |
-
|
376 |
-
@app.route('/generate_mask', methods=['POST'])
|
377 |
-
def generate_mask_api():
|
378 |
-
try:
|
379 |
-
# Récupérer les données de l'image à partir de la requête
|
380 |
-
data = request.json
|
381 |
-
base64_image = data.get('human_image')
|
382 |
-
categorie = data.get('categorie', 'upper_body')
|
383 |
-
|
384 |
-
# Décodage de l'image à partir de base64
|
385 |
-
human_img = process_image(base64_image)
|
386 |
-
|
387 |
-
# Appeler la fonction pour générer le masque
|
388 |
-
mask_resized = generate_mask(human_img, categorie)
|
389 |
-
|
390 |
-
# Encodage du masque en base64 pour la réponse
|
391 |
-
mask_base64 = encode_image_to_base64(mask_resized)
|
392 |
-
|
393 |
-
return jsonify({
|
394 |
-
'mask_image': mask_base64
|
395 |
-
}), 200
|
396 |
-
except Exception as e:
|
397 |
-
logging.error(f"Error generating mask: {e}")
|
398 |
-
return jsonify({'error': str(e)}), 500
|
399 |
|
400 |
# Route pour récupérer l'image générée
|
401 |
@app.route('/api/get_image/<image_id>', methods=['GET'])
|
|
|
275 |
else:
|
276 |
return images[0], mask_gray
|
277 |
|
278 |
+
|
279 |
+
@app.route('/tryon-v2', methods=['POST'])
|
280 |
+
def tryon_v2():
|
281 |
+
|
282 |
+
data = request.json
|
283 |
+
human_image_data = data['human_image']
|
284 |
+
garment_image_data = data['garment_image']
|
285 |
+
|
286 |
+
# Process images (base64 ou URL)
|
287 |
+
human_image = process_image(human_image_data)
|
288 |
+
garment_image = process_image(garment_image_data)
|
289 |
+
|
290 |
+
description = data.get('description')
|
291 |
+
use_auto_mask = data.get('use_auto_mask', True)
|
292 |
+
use_auto_crop = data.get('use_auto_crop', False)
|
293 |
+
denoise_steps = int(data.get('denoise_steps', 30))
|
294 |
+
seed = int(data.get('seed', random.randint(0, 9999999)))
|
295 |
+
categorie = data.get('categorie', 'upper_body')
|
296 |
+
|
297 |
+
# Vérifie si 'mask_image' est présent dans les données
|
298 |
+
mask_image = None
|
299 |
+
if 'mask_image' in data:
|
300 |
+
mask_image_data = data['mask_image']
|
301 |
+
mask_image = process_image(mask_image_data)
|
302 |
+
|
303 |
+
human_dict = {
|
304 |
+
'background': human_image,
|
305 |
+
'layers': [mask_image] if not use_auto_mask else None,
|
306 |
+
'composite': None
|
307 |
+
}
|
308 |
+
output_image, mask_image = start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed , categorie)
|
309 |
+
return jsonify({
|
310 |
+
'image_id': save_image(output_image)
|
311 |
+
})
|
312 |
|
313 |
def clear_gpu_memory():
|
314 |
torch.cuda.empty_cache()
|
|
|
349 |
'mask_image': mask_base64
|
350 |
})
|
351 |
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
|
354 |
# Route pour récupérer l'image générée
|
355 |
@app.route('/api/get_image/<image_id>', methods=['GET'])
|