Spaces:
Running
Running
File size: 7,114 Bytes
51a2766 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
from typing import Any, Dict, List, Optional
import cv2
import gradio
import facefusion.globals
from facefusion import wording
from facefusion.core import conditional_append_reference_faces
from facefusion.face_store import clear_static_faces, get_reference_faces, clear_reference_faces
from facefusion.typing import Frame, Face, FaceSet
from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_dimension, read_static_image, read_static_images
from facefusion.face_analyser import get_average_face, clear_face_analyser
from facefusion.content_analyser import analyse_frame
from facefusion.processors.frame.core import load_frame_processor_module
from facefusion.filesystem import is_image, is_video
from facefusion.uis.typing import ComponentName
from facefusion.uis.core import get_ui_component, register_ui_component
PREVIEW_IMAGE : Optional[gradio.Image] = None
PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global PREVIEW_IMAGE
global PREVIEW_FRAME_SLIDER
preview_image_args: Dict[str, Any] =\
{
'label': wording.get('preview_image_label'),
'interactive': False
}
preview_frame_slider_args: Dict[str, Any] =\
{
'label': wording.get('preview_frame_slider_label'),
'step': 1,
'minimum': 0,
'maximum': 100,
'visible': False
}
conditional_append_reference_faces()
source_frames = read_static_images(facefusion.globals.source_paths)
source_face = get_average_face(source_frames)
reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
if is_image(facefusion.globals.target_path):
target_frame = read_static_image(facefusion.globals.target_path)
preview_frame = process_preview_frame(source_face, reference_faces, target_frame)
preview_image_args['value'] = normalize_frame_color(preview_frame)
if is_video(facefusion.globals.target_path):
temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
preview_frame = process_preview_frame(source_face, reference_faces, temp_frame)
preview_image_args['value'] = normalize_frame_color(preview_frame)
preview_image_args['visible'] = True
preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number
preview_frame_slider_args['maximum'] = count_video_frame_total(facefusion.globals.target_path)
preview_frame_slider_args['visible'] = True
PREVIEW_IMAGE = gradio.Image(**preview_image_args)
PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
def listen() -> None:
PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
multi_one_component_names : List[ComponentName] =\
[
'source_image',
'target_image',
'target_video'
]
for component_name in multi_one_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
multi_two_component_names : List[ComponentName] =\
[
'target_image',
'target_video'
]
for component_name in multi_two_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
select_component_names : List[ComponentName] =\
[
'reference_face_position_gallery',
'face_analyser_order_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown'
]
for component_name in select_component_names:
component = get_ui_component(component_name)
if component:
component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
change_one_component_names : List[ComponentName] =\
[
'face_debugger_items_checkbox_group',
'face_enhancer_model_dropdown',
'face_enhancer_blend_slider',
'frame_enhancer_model_dropdown',
'frame_enhancer_blend_slider',
'face_selector_mode_dropdown',
'reference_face_distance_slider',
'face_mask_types_checkbox_group',
'face_mask_blur_slider',
'face_mask_padding_top_slider',
'face_mask_padding_bottom_slider',
'face_mask_padding_left_slider',
'face_mask_padding_right_slider',
'face_mask_region_checkbox_group'
]
for component_name in change_one_component_names:
component = get_ui_component(component_name)
if component:
component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
change_two_component_names : List[ComponentName] =\
[
'frame_processors_checkbox_group',
'face_swapper_model_dropdown',
'face_detector_model_dropdown',
'face_detector_size_dropdown',
'face_detector_score_slider'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
clear_face_analyser()
clear_reference_faces()
clear_static_faces()
return update_preview_image(frame_number)
def update_preview_image(frame_number : int = 0) -> gradio.Image:
conditional_append_reference_faces()
source_frames = read_static_images(facefusion.globals.source_paths)
source_face = get_average_face(source_frames)
reference_face = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
if is_image(facefusion.globals.target_path):
target_frame = read_static_image(facefusion.globals.target_path)
preview_frame = process_preview_frame(source_face, reference_face, target_frame)
preview_frame = normalize_frame_color(preview_frame)
return gradio.Image(value = preview_frame)
if is_video(facefusion.globals.target_path):
temp_frame = get_video_frame(facefusion.globals.target_path, frame_number)
preview_frame = process_preview_frame(source_face, reference_face, temp_frame)
preview_frame = normalize_frame_color(preview_frame)
return gradio.Image(value = preview_frame)
return gradio.Image(value = None)
def update_preview_frame_slider() -> gradio.Slider:
if is_video(facefusion.globals.target_path):
video_frame_total = count_video_frame_total(facefusion.globals.target_path)
return gradio.Slider(maximum = video_frame_total, visible = True)
return gradio.Slider(value = None, maximum = None, visible = False)
def process_preview_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
temp_frame = resize_frame_dimension(temp_frame, 640, 640)
if analyse_frame(temp_frame):
return cv2.GaussianBlur(temp_frame, (99, 99), 0)
for frame_processor in facefusion.globals.frame_processors:
frame_processor_module = load_frame_processor_module(frame_processor)
if frame_processor_module.pre_process('preview'):
temp_frame = frame_processor_module.process_frame(
source_face,
reference_faces,
temp_frame
)
return temp_frame
|