Spaces:
Running
Running
Ahsen Khaliq
commited on
Commit
·
a6087ef
1
Parent(s):
c0f3a3b
Update app.py
Browse files
app.py
CHANGED
@@ -4,18 +4,125 @@ os.system("wget https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/mo
|
|
4 |
os.system("wget https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/ParseNet-latest.pth -P .")
|
5 |
os.system("wget https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/rrdb_realesrnet_psnr.pth -P .")
|
6 |
|
7 |
-
import random
|
8 |
import gradio as gr
|
9 |
-
from PIL import Image
|
10 |
-
import torch
|
11 |
torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg')
|
12 |
torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
import cv2
|
14 |
import glob
|
|
|
|
|
15 |
import numpy as np
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
def inference(img):
|
|
|
|
|
|
|
|
|
19 |
os.system("python face_enhancement.py --model GPEN-BFR-512 --size 512 --channel_multiplier 2 --narrow 1 --use_sr --indir examples/imgs --outdir examples/outs-BFR2")
|
20 |
return "examples/outs-BFR2/"
|
21 |
|
|
|
4 |
os.system("wget https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/ParseNet-latest.pth -P .")
|
5 |
os.system("wget https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/rrdb_realesrnet_psnr.pth -P .")
|
6 |
|
|
|
7 |
import gradio as gr
|
|
|
|
|
8 |
torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg')
|
9 |
torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png')
|
10 |
+
|
11 |
+
'''
|
12 |
+
@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
|
13 |
+
@author: yangxy ([email protected])
|
14 |
+
'''
|
15 |
+
import os
|
16 |
import cv2
|
17 |
import glob
|
18 |
+
import time
|
19 |
+
import argparse
|
20 |
import numpy as np
|
21 |
+
from PIL import Image
|
22 |
+
import __init_paths
|
23 |
+
from face_detect.retinaface_detection import RetinaFaceDetection
|
24 |
+
from face_parse.face_parsing import FaceParse
|
25 |
+
from face_model.face_gan import FaceGAN
|
26 |
+
from sr_model.real_esrnet import RealESRNet
|
27 |
+
from align_faces import warp_and_crop_face, get_reference_facial_points
|
28 |
+
|
29 |
+
class FaceEnhancement(object):
|
30 |
+
def __init__(self, base_dir='./', size=512, model=None, use_sr=True, sr_model=None, channel_multiplier=2, narrow=1, key=None, device='cuda'):
|
31 |
+
self.facedetector = RetinaFaceDetection(base_dir, device)
|
32 |
+
self.facegan = FaceGAN(base_dir, size, model, channel_multiplier, narrow, key, device=device)
|
33 |
+
self.srmodel = RealESRNet(base_dir, sr_model, device=device)
|
34 |
+
self.faceparser = FaceParse(base_dir, device=device)
|
35 |
+
self.use_sr = use_sr
|
36 |
+
self.size = size
|
37 |
+
self.threshold = 0.9
|
38 |
+
# the mask for pasting restored faces back
|
39 |
+
self.mask = np.zeros((512, 512), np.float32)
|
40 |
+
cv2.rectangle(self.mask, (26, 26), (486, 486), (1, 1, 1), -1, cv2.LINE_AA)
|
41 |
+
self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)
|
42 |
+
self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)
|
43 |
+
self.kernel = np.array((
|
44 |
+
[0.0625, 0.125, 0.0625],
|
45 |
+
[0.125, 0.25, 0.125],
|
46 |
+
[0.0625, 0.125, 0.0625]), dtype="float32")
|
47 |
+
# get the reference 5 landmarks position in the crop settings
|
48 |
+
default_square = True
|
49 |
+
inner_padding_factor = 0.25
|
50 |
+
outer_padding = (0, 0)
|
51 |
+
self.reference_5pts = get_reference_facial_points(
|
52 |
+
(self.size, self.size), inner_padding_factor, outer_padding, default_square)
|
53 |
+
def mask_postprocess(self, mask, thres=20):
|
54 |
+
mask[:thres, :] = 0; mask[-thres:, :] = 0
|
55 |
+
mask[:, :thres] = 0; mask[:, -thres:] = 0
|
56 |
+
mask = cv2.GaussianBlur(mask, (101, 101), 11)
|
57 |
+
mask = cv2.GaussianBlur(mask, (101, 101), 11)
|
58 |
+
return mask.astype(np.float32)
|
59 |
+
def process(self, img):
|
60 |
+
if self.use_sr:
|
61 |
+
img_sr = self.srmodel.process(img)
|
62 |
+
if img_sr is not None:
|
63 |
+
img = cv2.resize(img, img_sr.shape[:2][::-1])
|
64 |
+
facebs, landms = self.facedetector.detect(img)
|
65 |
+
|
66 |
+
orig_faces, enhanced_faces = [], []
|
67 |
+
height, width = img.shape[:2]
|
68 |
+
full_mask = np.zeros((height, width), dtype=np.float32)
|
69 |
+
full_img = np.zeros(img.shape, dtype=np.uint8)
|
70 |
+
for i, (faceb, facial5points) in enumerate(zip(facebs, landms)):
|
71 |
+
if faceb[4]<self.threshold: continue
|
72 |
+
fh, fw = (faceb[3]-faceb[1]), (faceb[2]-faceb[0])
|
73 |
+
facial5points = np.reshape(facial5points, (2, 5))
|
74 |
+
of, tfm_inv = warp_and_crop_face(img, facial5points, reference_pts=self.reference_5pts, crop_size=(self.size, self.size))
|
75 |
+
|
76 |
+
# enhance the face
|
77 |
+
ef = self.facegan.process(of)
|
78 |
+
|
79 |
+
orig_faces.append(of)
|
80 |
+
enhanced_faces.append(ef)
|
81 |
+
|
82 |
+
#tmp_mask = self.mask
|
83 |
+
tmp_mask = self.mask_postprocess(self.faceparser.process(ef)[0]/255.)
|
84 |
+
tmp_mask = cv2.resize(tmp_mask, ef.shape[:2])
|
85 |
+
tmp_mask = cv2.warpAffine(tmp_mask, tfm_inv, (width, height), flags=3)
|
86 |
+
if min(fh, fw)<100: # gaussian filter for small faces
|
87 |
+
ef = cv2.filter2D(ef, -1, self.kernel)
|
88 |
+
|
89 |
+
tmp_img = cv2.warpAffine(ef, tfm_inv, (width, height), flags=3)
|
90 |
+
mask = tmp_mask - full_mask
|
91 |
+
full_mask[np.where(mask>0)] = tmp_mask[np.where(mask>0)]
|
92 |
+
full_img[np.where(mask>0)] = tmp_img[np.where(mask>0)]
|
93 |
+
full_mask = full_mask[:, :, np.newaxis]
|
94 |
+
if self.use_sr and img_sr is not None:
|
95 |
+
img = cv2.convertScaleAbs(img_sr*(1-full_mask) + full_img*full_mask)
|
96 |
+
else:
|
97 |
+
img = cv2.convertScaleAbs(img*(1-full_mask) + full_img*full_mask)
|
98 |
+
return img, orig_faces, enhanced_faces
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
model = "GPEN-BFR-512"
|
103 |
+
|
104 |
+
key = None
|
105 |
+
size = 512
|
106 |
+
channel_multiplier = 2
|
107 |
+
narrow = 1
|
108 |
+
use_sr = False
|
109 |
+
use_cuda = False
|
110 |
+
sr_model = 'rrdb_realesrnet_psnr'
|
111 |
+
indir = "."
|
112 |
+
outdir = "."
|
113 |
+
|
114 |
+
|
115 |
+
faceenhancer = FaceEnhancement(size=size, model=model, use_sr=use_sr, sr_model=sr_model, channel_multiplier=channel_multiplier, narrow=narrow, key=key, device='cuda' if args.use_cuda else 'cpu')
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
|
121 |
def inference(img):
|
122 |
+
im = cv2.imread(file, cv2.IMREAD_COLOR)
|
123 |
+
img, orig_faces, enhanced_faces = faceenhancer.process(im)
|
124 |
+
|
125 |
+
return enhanced_faces[0]
|
126 |
os.system("python face_enhancement.py --model GPEN-BFR-512 --size 512 --channel_multiplier 2 --narrow 1 --use_sr --indir examples/imgs --outdir examples/outs-BFR2")
|
127 |
return "examples/outs-BFR2/"
|
128 |
|