victorisgeek commited on
Commit
0c1eceb
1 Parent(s): 45fce2a

Upload face_enhancer.py

Browse files
Files changed (1) hide show
  1. face_enhancer.py +301 -0
face_enhancer.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ from time import sleep
4
+ import cv2
5
+ import numpy
6
+ import onnxruntime
7
+
8
+ import facefusion.globals
9
+ import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import config, process_manager, logger, wording
11
+ from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face
12
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder
13
+ from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back
14
+ from facefusion.execution import apply_execution_provider_options
15
+ from facefusion.content_analyser import clear_content_analyser
16
+ from facefusion.face_store import get_reference_faces
17
+ from facefusion.normalizer import normalize_output_path
18
+ from facefusion.thread_helper import thread_lock, thread_semaphore
19
+ from facefusion.typing import Face, VisionFrame, UpdateProgress, ProcessMode, ModelSet, OptionsWithModel, QueuePayload
20
+ from facefusion.common_helper import create_metavar
21
+ from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path
22
+ from facefusion.download import conditional_download, is_download_done
23
+ from facefusion.vision import read_image, read_static_image, write_image
24
+ from facefusion.processors.frame.typings import FaceEnhancerInputs
25
+ from facefusion.processors.frame import globals as frame_processors_globals
26
+ from facefusion.processors.frame import choices as frame_processors_choices
27
+
28
+ FRAME_PROCESSOR = None
29
+ NAME = __name__.upper()
30
+ MODELS : ModelSet =\
31
+ {
32
+ 'codeformer':
33
+ {
34
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
35
+ 'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
36
+ 'template': 'ffhq_512',
37
+ 'size': (512, 512)
38
+ },
39
+ 'gfpgan_1.2':
40
+ {
41
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
42
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
43
+ 'template': 'ffhq_512',
44
+ 'size': (512, 512)
45
+ },
46
+ 'gfpgan_1.3':
47
+ {
48
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
49
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
50
+ 'template': 'ffhq_512',
51
+ 'size': (512, 512)
52
+ },
53
+ 'gfpgan_1.4':
54
+ {
55
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
56
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
57
+ 'template': 'ffhq_512',
58
+ 'size': (512, 512)
59
+ },
60
+ 'gpen_bfr_256':
61
+ {
62
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
63
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
64
+ 'template': 'arcface_128_v2',
65
+ 'size': (256, 256)
66
+ },
67
+ 'gpen_bfr_512':
68
+ {
69
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
70
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
71
+ 'template': 'ffhq_512',
72
+ 'size': (512, 512)
73
+ },
74
+ 'gpen_bfr_1024':
75
+ {
76
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_1024.onnx',
77
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx'),
78
+ 'template': 'ffhq_512',
79
+ 'size': (1024, 1024)
80
+ },
81
+ 'gpen_bfr_2048':
82
+ {
83
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_2048.onnx',
84
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx'),
85
+ 'template': 'ffhq_512',
86
+ 'size': (2048, 2048)
87
+ },
88
+ 'restoreformer_plus_plus':
89
+ {
90
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer_plus_plus.onnx',
91
+ 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx'),
92
+ 'template': 'ffhq_512',
93
+ 'size': (512, 512)
94
+ }
95
+ }
96
+ OPTIONS : Optional[OptionsWithModel] = None
97
+
98
+
99
+ def get_frame_processor() -> Any:
100
+ global FRAME_PROCESSOR
101
+
102
+ with thread_lock():
103
+ while process_manager.is_checking():
104
+ sleep(0.5)
105
+ if FRAME_PROCESSOR is None:
106
+ model_path = get_options('model').get('path')
107
+ FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
108
+ return FRAME_PROCESSOR
109
+
110
+
111
+ def clear_frame_processor() -> None:
112
+ global FRAME_PROCESSOR
113
+
114
+ FRAME_PROCESSOR = None
115
+
116
+
117
+ def get_options(key : Literal['model']) -> Any:
118
+ global OPTIONS
119
+
120
+ if OPTIONS is None:
121
+ OPTIONS =\
122
+ {
123
+ 'model': MODELS[frame_processors_globals.face_enhancer_model]
124
+ }
125
+ return OPTIONS.get(key)
126
+
127
+
128
+ def set_options(key : Literal['model'], value : Any) -> None:
129
+ global OPTIONS
130
+
131
+ OPTIONS[key] = value
132
+
133
+
134
+ def register_args(program : ArgumentParser) -> None:
135
+ program.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('frame_processors.face_enhancer_model', 'gfpgan_1.4'), choices = frame_processors_choices.face_enhancer_models)
136
+ program.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.face_enhancer_blend', '80'), choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
137
+
138
+
139
+ def apply_args(program : ArgumentParser) -> None:
140
+ args = program.parse_args()
141
+ frame_processors_globals.face_enhancer_model = args.face_enhancer_model
142
+ frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
143
+
144
+
145
+ def pre_check() -> bool:
146
+ download_directory_path = resolve_relative_path('../.assets/models')
147
+ model_url = get_options('model').get('url')
148
+ model_path = get_options('model').get('path')
149
+
150
+ if not facefusion.globals.skip_download:
151
+ process_manager.check()
152
+ conditional_download(download_directory_path, [ model_url ])
153
+ process_manager.end()
154
+ return is_file(model_path)
155
+
156
+
157
+ def post_check() -> bool:
158
+ model_url = get_options('model').get('url')
159
+ model_path = get_options('model').get('path')
160
+
161
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
162
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
163
+ return False
164
+ if not is_file(model_path):
165
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
166
+ return False
167
+ return True
168
+
169
+
170
+ def pre_process(mode : ProcessMode) -> bool:
171
+ if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
172
+ logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
173
+ return False
174
+ if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path):
175
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
176
+ return False
177
+ return True
178
+
179
+
180
+ def post_process() -> None:
181
+ read_static_image.cache_clear()
182
+ if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate':
183
+ clear_frame_processor()
184
+ if facefusion.globals.video_memory_strategy == 'strict':
185
+ clear_face_analyser()
186
+ clear_content_analyser()
187
+ clear_face_occluder()
188
+
189
+
190
+ def enhance_face(target_face: Face, temp_vision_frame : VisionFrame) -> VisionFrame:
191
+ model_template = get_options('model').get('template')
192
+ model_size = get_options('model').get('size')
193
+ crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmarks.get('5/68'), model_template, model_size)
194
+ box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, (0, 0, 0, 0))
195
+ crop_mask_list =\
196
+ [
197
+ box_mask
198
+ ]
199
+
200
+ if 'occlusion' in facefusion.globals.face_mask_types:
201
+ occlusion_mask = create_occlusion_mask(crop_vision_frame)
202
+ crop_mask_list.append(occlusion_mask)
203
+ crop_vision_frame = prepare_crop_frame(crop_vision_frame)
204
+ crop_vision_frame = apply_enhance(crop_vision_frame)
205
+ crop_vision_frame = normalize_crop_frame(crop_vision_frame)
206
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
207
+ paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix)
208
+ temp_vision_frame = blend_frame(temp_vision_frame, paste_vision_frame)
209
+ return temp_vision_frame
210
+
211
+
212
+ def apply_enhance(crop_vision_frame : VisionFrame) -> VisionFrame:
213
+ frame_processor = get_frame_processor()
214
+ frame_processor_inputs = {}
215
+
216
+ for frame_processor_input in frame_processor.get_inputs():
217
+ if frame_processor_input.name == 'input':
218
+ frame_processor_inputs[frame_processor_input.name] = crop_vision_frame
219
+ if frame_processor_input.name == 'weight':
220
+ weight = numpy.array([ 1 ]).astype(numpy.double)
221
+ frame_processor_inputs[frame_processor_input.name] = weight
222
+ with thread_semaphore():
223
+ crop_vision_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
224
+ return crop_vision_frame
225
+
226
+
227
+ def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
228
+ crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0
229
+ crop_vision_frame = (crop_vision_frame - 0.5) / 0.5
230
+ crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
231
+ return crop_vision_frame
232
+
233
+
234
+ def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
235
+ crop_vision_frame = numpy.clip(crop_vision_frame, -1, 1)
236
+ crop_vision_frame = (crop_vision_frame + 1) / 2
237
+ crop_vision_frame = crop_vision_frame.transpose(1, 2, 0)
238
+ crop_vision_frame = (crop_vision_frame * 255.0).round()
239
+ crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1]
240
+ return crop_vision_frame
241
+
242
+
243
+ def blend_frame(temp_vision_frame : VisionFrame, paste_vision_frame : VisionFrame) -> VisionFrame:
244
+ face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100)
245
+ temp_vision_frame = cv2.addWeighted(temp_vision_frame, face_enhancer_blend, paste_vision_frame, 1 - face_enhancer_blend, 0)
246
+ return temp_vision_frame
247
+
248
+
249
+ def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
250
+ return enhance_face(target_face, temp_vision_frame)
251
+
252
+
253
+ def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame:
254
+ reference_faces = inputs.get('reference_faces')
255
+ target_vision_frame = inputs.get('target_vision_frame')
256
+
257
+ if facefusion.globals.face_selector_mode == 'many':
258
+ many_faces = get_many_faces(target_vision_frame)
259
+ if many_faces:
260
+ for target_face in many_faces:
261
+ target_vision_frame = enhance_face(target_face, target_vision_frame)
262
+ if facefusion.globals.face_selector_mode == 'one':
263
+ target_face = get_one_face(target_vision_frame)
264
+ if target_face:
265
+ target_vision_frame = enhance_face(target_face, target_vision_frame)
266
+ if facefusion.globals.face_selector_mode == 'reference':
267
+ similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance)
268
+ if similar_faces:
269
+ for similar_face in similar_faces:
270
+ target_vision_frame = enhance_face(similar_face, target_vision_frame)
271
+ return target_vision_frame
272
+
273
+
274
+ def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
275
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
276
+
277
+ for queue_payload in process_manager.manage(queue_payloads):
278
+ target_vision_path = queue_payload['frame_path']
279
+ target_vision_frame = read_image(target_vision_path)
280
+ output_vision_frame = process_frame(
281
+ {
282
+ 'reference_faces': reference_faces,
283
+ 'target_vision_frame': target_vision_frame
284
+ })
285
+ write_image(target_vision_path, output_vision_frame)
286
+ update_progress(1)
287
+
288
+
289
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
290
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
291
+ target_vision_frame = read_static_image(target_path)
292
+ output_vision_frame = process_frame(
293
+ {
294
+ 'reference_faces': reference_faces,
295
+ 'target_vision_frame': target_vision_frame
296
+ })
297
+ write_image(output_path, output_vision_frame)
298
+
299
+
300
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
301
+ frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)