G-Rost commited on
Commit
54deee2
1 Parent(s): e879118

Upload 3 files

Browse files
roop/processors/roop_processors_frame_core.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import importlib
4
+ import psutil
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from queue import Queue
7
+ from types import ModuleType
8
+ from typing import Any, List, Callable
9
+ from tqdm import tqdm
10
+
11
+ import roop
12
+
13
+ FRAME_PROCESSORS_MODULES: List[ModuleType] = []
14
+ FRAME_PROCESSORS_INTERFACE = [
15
+ 'pre_check',
16
+ 'pre_start',
17
+ 'process_frame',
18
+ 'process_frames',
19
+ 'process_image',
20
+ 'process_video',
21
+ 'post_process'
22
+ ]
23
+
24
+
25
+ def load_frame_processor_module(frame_processor: str) -> Any:
26
+ try:
27
+ frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
28
+ for method_name in FRAME_PROCESSORS_INTERFACE:
29
+ if not hasattr(frame_processor_module, method_name):
30
+ raise NotImplementedError
31
+ except ModuleNotFoundError:
32
+ sys.exit(f'Frame processor {frame_processor} not found.')
33
+ except NotImplementedError:
34
+ sys.exit(f'Frame processor {frame_processor} not implemented correctly.')
35
+ return frame_processor_module
36
+
37
+
38
+ def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
39
+ global FRAME_PROCESSORS_MODULES
40
+
41
+ if not FRAME_PROCESSORS_MODULES:
42
+ for frame_processor in frame_processors:
43
+ frame_processor_module = load_frame_processor_module(frame_processor)
44
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
45
+ return FRAME_PROCESSORS_MODULES
46
+
47
+
48
+ def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
49
+ with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
50
+ futures = []
51
+ queue = create_queue(temp_frame_paths)
52
+ queue_per_future = max(len(temp_frame_paths) // roop.globals.execution_threads, 1)
53
+ while not queue.empty():
54
+ future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
55
+ futures.append(future)
56
+ for future in as_completed(futures):
57
+ future.result()
58
+
59
+
60
+ def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
61
+ queue: Queue[str] = Queue()
62
+ for frame_path in temp_frame_paths:
63
+ queue.put(frame_path)
64
+ return queue
65
+
66
+
67
+ def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
68
+ queues = []
69
+ for _ in range(queue_per_future):
70
+ if not queue.empty():
71
+ queues.append(queue.get())
72
+ return queues
73
+
74
+
75
+ def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
76
+ progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
77
+ total = len(frame_paths)
78
+ with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
79
+ multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
80
+
81
+
82
+ def update_progress(progress: Any = None) -> None:
83
+ process = psutil.Process(os.getpid())
84
+ memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
85
+ progress.set_postfix({
86
+ 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
87
+ 'execution_providers': roop.globals.execution_providers,
88
+ 'execution_threads': roop.globals.execution_threads
89
+ })
90
+ progress.refresh()
91
+ progress.update(1)
roop/processors/roop_processors_frame_face_enhancer.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Callable
2
+ import cv2
3
+ import threading
4
+ from gfpgan.utils import GFPGANer
5
+
6
+ import roop.globals
7
+ import roop.processors.frame.core
8
+ from roop.core import update_status
9
+ from roop.face_analyser import get_many_faces
10
+ from roop.typing import Frame, Face
11
+ from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
12
+
13
+ FACE_ENHANCER = None
14
+ THREAD_SEMAPHORE = threading.Semaphore()
15
+ THREAD_LOCK = threading.Lock()
16
+ NAME = 'ROOP.FACE-ENHANCER'
17
+
18
+
19
+ def get_face_enhancer() -> Any:
20
+ global FACE_ENHANCER
21
+
22
+ with THREAD_LOCK:
23
+ if FACE_ENHANCER is None:
24
+ model_path = resolve_relative_path('../models/GFPGANv1.4.pth')
25
+ # todo: set models path -> https://github.com/TencentARC/GFPGAN/issues/399
26
+ FACE_ENHANCER = GFPGANer(model_path=model_path, upscale=1, device=get_device())
27
+ return FACE_ENHANCER
28
+
29
+
30
+ def get_device() -> str:
31
+ if 'CUDAExecutionProvider' in roop.globals.execution_providers:
32
+ return 'cuda'
33
+ if 'CoreMLExecutionProvider' in roop.globals.execution_providers:
34
+ return 'mps'
35
+ return 'cpu'
36
+
37
+
38
+ def clear_face_enhancer() -> None:
39
+ global FACE_ENHANCER
40
+
41
+ FACE_ENHANCER = None
42
+
43
+
44
+ def pre_check() -> bool:
45
+ download_directory_path = resolve_relative_path('../models')
46
+ conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth'])
47
+ return True
48
+
49
+
50
+ def pre_start() -> bool:
51
+ if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
52
+ update_status('Select an image or video for target path.', NAME)
53
+ return False
54
+ return True
55
+
56
+
57
+ def post_process() -> None:
58
+ clear_face_enhancer()
59
+
60
+
61
+ def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
62
+ start_x, start_y, end_x, end_y = map(int, target_face['bbox'])
63
+ padding_x = int((end_x - start_x) * 0.5)
64
+ padding_y = int((end_y - start_y) * 0.5)
65
+ start_x = max(0, start_x - padding_x)
66
+ start_y = max(0, start_y - padding_y)
67
+ end_x = max(0, end_x + padding_x)
68
+ end_y = max(0, end_y + padding_y)
69
+ temp_face = temp_frame[start_y:end_y, start_x:end_x]
70
+ if temp_face.size:
71
+ with THREAD_SEMAPHORE:
72
+ _, _, temp_face = get_face_enhancer().enhance(
73
+ temp_face,
74
+ paste_back=True
75
+ )
76
+ temp_frame[start_y:end_y, start_x:end_x] = temp_face
77
+ return temp_frame
78
+
79
+
80
+ def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
81
+ many_faces = get_many_faces(temp_frame)
82
+ if many_faces:
83
+ for target_face in many_faces:
84
+ temp_frame = enhance_face(target_face, temp_frame)
85
+ return temp_frame
86
+
87
+
88
+ def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
89
+ for temp_frame_path in temp_frame_paths:
90
+ temp_frame = cv2.imread(temp_frame_path)
91
+ result = process_frame(None, None, temp_frame)
92
+ cv2.imwrite(temp_frame_path, result)
93
+ if update:
94
+ update()
95
+
96
+
97
+ def process_image(source_path: str, target_path: str, output_path: str) -> None:
98
+ target_frame = cv2.imread(target_path)
99
+ result = process_frame(None, None, target_frame)
100
+ cv2.imwrite(output_path, result)
101
+
102
+
103
+ def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
104
+ roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
roop/processors/roop_processors_frame_face_swapper.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Callable
2
+ import cv2
3
+ import insightface
4
+ import threading
5
+
6
+ import roop.globals
7
+ import roop.processors.frame.core
8
+ from roop.core import update_status
9
+ from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
10
+ from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
11
+ from roop.typing import Face, Frame
12
+ from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
13
+
14
+ FACE_SWAPPER = None
15
+ THREAD_LOCK = threading.Lock()
16
+ NAME = 'ROOP.FACE-SWAPPER'
17
+
18
+
19
+ def get_face_swapper() -> Any:
20
+ global FACE_SWAPPER
21
+
22
+ with THREAD_LOCK:
23
+ if FACE_SWAPPER is None:
24
+ model_path = resolve_relative_path('../models/inswapper_128.onnx')
25
+ FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
26
+ return FACE_SWAPPER
27
+
28
+
29
+ def clear_face_swapper() -> None:
30
+ global FACE_SWAPPER
31
+
32
+ FACE_SWAPPER = None
33
+
34
+
35
+ def pre_check() -> bool:
36
+ download_directory_path = resolve_relative_path('../models')
37
+ conditional_download(download_directory_path, ['https://huggingface.co/CountFloyd/deepfake/resolve/main/inswapper_128.onnx'])
38
+ return True
39
+
40
+
41
+ def pre_start() -> bool:
42
+ if not is_image(roop.globals.source_path):
43
+ update_status('Select an image for source path.', NAME)
44
+ return False
45
+ elif not get_one_face(cv2.imread(roop.globals.source_path)):
46
+ update_status('No face in source path detected.', NAME)
47
+ return False
48
+ if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
49
+ update_status('Select an image or video for target path.', NAME)
50
+ return False
51
+ return True
52
+
53
+
54
+ def post_process() -> None:
55
+ clear_face_swapper()
56
+ clear_face_reference()
57
+
58
+
59
+ def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
60
+ return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
61
+
62
+
63
+ def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
64
+ if roop.globals.many_faces:
65
+ many_faces = get_many_faces(temp_frame)
66
+ if many_faces:
67
+ for target_face in many_faces:
68
+ temp_frame = swap_face(source_face, target_face, temp_frame)
69
+ else:
70
+ target_face = find_similar_face(temp_frame, reference_face)
71
+ if target_face:
72
+ temp_frame = swap_face(source_face, target_face, temp_frame)
73
+ return temp_frame
74
+
75
+
76
+ def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
77
+ source_face = get_one_face(cv2.imread(source_path))
78
+ reference_face = None if roop.globals.many_faces else get_face_reference()
79
+ for temp_frame_path in temp_frame_paths:
80
+ temp_frame = cv2.imread(temp_frame_path)
81
+ result = process_frame(source_face, reference_face, temp_frame)
82
+ cv2.imwrite(temp_frame_path, result)
83
+ if update:
84
+ update()
85
+
86
+
87
+ def process_image(source_path: str, target_path: str, output_path: str) -> None:
88
+ source_face = get_one_face(cv2.imread(source_path))
89
+ target_frame = cv2.imread(target_path)
90
+ reference_face = None if roop.globals.many_faces else get_one_face(target_frame, roop.globals.reference_face_position)
91
+ result = process_frame(source_face, reference_face, target_frame)
92
+ cv2.imwrite(output_path, result)
93
+
94
+
95
+ def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
96
+ if not roop.globals.many_faces and not get_face_reference():
97
+ reference_frame = cv2.imread(temp_frame_paths[roop.globals.reference_frame_number])
98
+ reference_face = get_one_face(reference_frame, roop.globals.reference_face_position)
99
+ set_face_reference(reference_face)
100
+ roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)