|
import os |
|
os.system("pip install dlib") |
|
os.system("pip install gradio==2.5.3") |
|
import sys |
|
import face_detection |
|
import PIL |
|
from PIL import Image, ImageOps, ImageFile |
|
import numpy as np |
|
|
|
import torch |
|
torch.set_grad_enabled(False) |
|
model = torch.jit.load('u2net_bce_itr_18000_train_3.891670_tar_0.553700_512x_460x.jit.pt') |
|
model.eval() |
|
|
|
def normPRED(d): |
|
ma = np.max(d) |
|
mi = np.min(d) |
|
|
|
dn = (d-mi)/(ma-mi) |
|
|
|
return dn |
|
|
|
def array_to_image(array_in): |
|
array_in = normPRED(array_in) |
|
array_in = np.squeeze(255.0*(array_in)) |
|
array_in = np.transpose(array_in, (1, 2, 0)) |
|
im = Image.fromarray(array_in.astype(np.uint8)) |
|
return im |
|
|
|
|
|
def image_as_array(image_in): |
|
image_in = np.array(image_in, np.float32) |
|
tmpImg = np.zeros((image_in.shape[0],image_in.shape[1],3)) |
|
image_in = image_in/np.max(image_in) |
|
if image_in.shape[2]==1: |
|
tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229 |
|
tmpImg[:,:,1] = (image_in[:,:,0]-0.485)/0.229 |
|
tmpImg[:,:,2] = (image_in[:,:,0]-0.485)/0.229 |
|
else: |
|
tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229 |
|
tmpImg[:,:,1] = (image_in[:,:,1]-0.456)/0.224 |
|
tmpImg[:,:,2] = (image_in[:,:,2]-0.406)/0.225 |
|
|
|
tmpImg = tmpImg.transpose((2, 0, 1)) |
|
image_out = np.expand_dims(tmpImg, 0) |
|
return image_out |
|
|
|
def find_aligned_face(image_in, size=512): |
|
aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size) |
|
return aligned_image, n_faces, quad |
|
|
|
def align_first_face(image_in, size=512): |
|
aligned_image, n_faces, quad = find_aligned_face(image_in,size=size) |
|
if n_faces == 0: |
|
try: |
|
image_in = ImageOps.exif_transpose(image_in) |
|
except: |
|
print("exif problem, not rotating") |
|
image_in = image_in.resize((size, size)) |
|
im_array = image_as_array(image_in) |
|
else: |
|
im_array = image_as_array(aligned_image) |
|
|
|
return im_array |
|
|
|
def img_concat_h(im1, im2): |
|
dst = Image.new('RGB', (im1.width + im2.width, im1.height)) |
|
dst.paste(im1, (0, 0)) |
|
dst.paste(im2, (im1.width, 0)) |
|
return dst |
|
|
|
import gradio as gr |
|
|
|
def face2doll( |
|
img: Image.Image, |
|
size: int |
|
) -> Image.Image: |
|
|
|
aligned_img = align_first_face(img) |
|
if aligned_img is None: |
|
output=None |
|
else: |
|
input = torch.Tensor(aligned_img) |
|
results = model(input) |
|
d2 = array_to_image(results[1].detach().numpy()) |
|
output = img_concat_h(array_to_image(aligned_img), d2) |
|
del results |
|
|
|
return output |
|
|
|
def inference(img): |
|
out = face2doll(img, 512) |
|
return out |
|
|
|
|
|
title = "Face2Doll U2Net" |
|
description = "Style transfer a face into one of a \"Doll\". Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created. Faces with glasses on, seem not to yield good results." |
|
article = "<hr><p style='text-align: center'>See the <a href='https://github.com/Norod/U-2-Net-StyleTransfer' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples: <img src='https://hf.space/gradioiframe/Norod78/Face2Doll/file/Sample00001.jpg' alt='Sample00001'/><img src='https://hf.space/gradioiframe/Norod78/Face2Doll/file/Sample00002.jpg' alt='Sample00002'/><img src='https://hf.space/gradioiframe/Norod78/Face2Doll/file/Sample00003.jpg' alt='Sample00003'/><img src='https://hf.space/gradioiframe/Norod78/Face2Doll/file/Sample00004.jpg' alt='Sample00004'/><img src='https://hf.space/gradioiframe/Norod78/Face2Doll/file/Sample00005.jpg' alt='Sample00005'/></p><p>The \"Face2Doll (U2Net)\" model was trained by <a href='https://linktr.ee/Norod78' target='_blank'>Doron Adler</a></p>" |
|
|
|
examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']] |
|
|
|
gr.Interface( |
|
inference, |
|
gr.inputs.Image(type="pil", label="Input"), |
|
gr.outputs.Image(type="pil", label="Output"), |
|
title=title, |
|
description=description, |
|
article=article, |
|
examples=examples, |
|
enable_queue=True, |
|
allow_flagging=False |
|
).launch() |
|
|