matthewfarant commited on
Commit
87f88b5
·
1 Parent(s): 8fa6720

Initial commit

Browse files
Files changed (2) hide show
  1. app.py +171 -0
  2. requirements.txt +134 -0
app.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ import tensorflow as tf
7
+ from sklearn.metrics.pairwise import cosine_similarity
8
+
9
+ import matplotlib.patches as patches
10
+ from facenet_pytorch import InceptionResnetV1, MTCNN
11
+ import mtcnn
12
+ import torch
13
+ import shutil
14
+ from PIL import Image
15
+ import ssl
16
+ ssl._create_default_https_context = ssl._create_unverified_context
17
+
18
+ # Current directory
19
+ abspath = os.path.abspath(__file__)
20
+ dname = os.path.dirname(abspath)
21
+ os.chdir(dname)
22
+
23
+ def save_uploaded_files(uploaded_file_paths, folder):
24
+ if not os.path.exists(folder):
25
+ os.makedirs(folder)
26
+ else:
27
+ shutil.rmtree(folder, ignore_errors=True)
28
+ os.makedirs(folder)
29
+
30
+ for uploaded_file_path in uploaded_file_paths:
31
+ shutil.move(uploaded_file_path, os.path.join(folder, os.path.basename(uploaded_file_path)))
32
+
33
+ def face_detection(img, threshold=0.9, return_coords=True):
34
+ # Detection
35
+ detector = mtcnn.MTCNN()
36
+ faces = detector.detect_faces(img)
37
+
38
+ # Create a list to store face coordinates and cropped faces
39
+ face_list = []
40
+ face_coords = []
41
+
42
+ # Draw bounding boxes and save each face as a separate image
43
+ for i, face in enumerate(faces):
44
+ if face['confidence']>= threshold:
45
+ x, y, width, height = face['box']
46
+ # Append face coordinates to the list
47
+ face_coords.append((x, y, width, height))
48
+
49
+ # Create a rectangle patch
50
+ rect = patches.Rectangle((x, y), width, height, linewidth=2, edgecolor='orange', facecolor='none')
51
+
52
+ # Add the rectangle to the plot
53
+ plt.gca().add_patch(rect)
54
+
55
+ # Crop the face and append to the list
56
+ face_img = img[y:y+height, x:x+width]
57
+ face_list.append(face_img)
58
+ else:
59
+ continue
60
+
61
+ if return_coords:
62
+ return face_list, face_coords
63
+ else:
64
+ return face_list
65
+
66
+ def generate_combined_reference():
67
+ image_paths = []
68
+ for image in os.listdir("reference"):
69
+ image_paths.append("reference/" + image)
70
+
71
+ # Open each image and resize or pad to the dimensions of the largest image
72
+ max_width, max_height = 0, 0
73
+ images_resized = []
74
+
75
+ for path in image_paths:
76
+ image = Image.open(path)
77
+ width, height = image.size
78
+ max_width = max(max_width, width)
79
+ max_height = max(max_height, height)
80
+ images_resized.append(image.resize((max_width, max_height)))
81
+
82
+ # Create a new blank image with the combined width and the total height
83
+ combined_image = Image.new("RGB", (max_width * len(images_resized), max_height))
84
+
85
+ # Paste each image into the combined image
86
+ for i, image in enumerate(images_resized):
87
+ combined_image.paste(image, (i * max_width, 0))
88
+
89
+ # Save the combined image
90
+ combined_image.save("combined_reference.jpg")
91
+
92
+ return "Created combined reference image."
93
+
94
+ def img_to_encoding(img):
95
+ model = InceptionResnetV1(pretrained='vggface2').eval()
96
+ img = cv2.resize(img, (160, 160))
97
+ img = np.expand_dims(img, axis = 0)
98
+ img = img / 255.0 # Normalize pixel values
99
+ img = torch.tensor(img.transpose(0, 3, 1, 2), dtype = torch.float32) # Adjust image format for PyTorch
100
+ encoding = model(img)
101
+
102
+ return encoding.flatten().detach().numpy()
103
+
104
+ def process_image():
105
+ # Load group photo in "testing" folder
106
+ group_photo_path = "testing/"
107
+ group_photo = None
108
+
109
+ for image in os.listdir(group_photo_path):
110
+ group_photo = cv2.imread(group_photo_path + image)
111
+ break
112
+
113
+ if group_photo is None:
114
+ return "No image found in testing folder."
115
+ elif len(os.listdir(group_photo_path)) > 1:
116
+ return "Can only process one image at a time."
117
+
118
+ # Face detection
119
+ group_photo_faces, group_photo_face_coords = face_detection(group_photo)
120
+
121
+ # Generate reference image & do face detection
122
+ generate_combined_reference()
123
+ reference_photo = plt.imread("combined_reference.jpg")
124
+ reference_faces = face_detection(reference_photo, threshold=0.9, return_coords=False)
125
+
126
+ # Convert the reference faces & group photo into 128 dimensional vector
127
+ ref_encodings = [img_to_encoding(face) for face in reference_faces]
128
+ face_encodings = [img_to_encoding(face) for face in group_photo_faces]
129
+
130
+ # Calculate cosine similarity between each face in the group photo and each reference face
131
+ similarities = cosine_similarity(ref_encodings, face_encodings)
132
+
133
+ # compute the average similarity for each face in face_list across all reference faces
134
+ average_similarities = np.mean(similarities, axis=0)
135
+
136
+ # Blur the face with the highest average similarity in group photo
137
+ max_avg_similarity_idx = np.argmax(average_similarities)
138
+
139
+ # Coordinates of the face with the highest average similarity
140
+ (x, y, w, h) = group_photo_face_coords[max_avg_similarity_idx]
141
+
142
+ # Blur the corresponding region in group photo
143
+ img_obstruct = cv2.imread(group_photo_path + image)
144
+ img_obstruct[y:y+h, x:x+w] = cv2.blur(img_obstruct[y:y+h, x:x+w], (50, 50))
145
+ img_obstruct = cv2.cvtColor(img_obstruct, cv2.COLOR_BGR2RGB)
146
+
147
+ # Delete all photos in reference and testing folder after processing
148
+ shutil.rmtree('reference', ignore_errors=True)
149
+ shutil.rmtree('testing', ignore_errors=True)
150
+
151
+ return img_obstruct
152
+
153
+ # Setting up Gradio Interface
154
+ def interface_fn(reference_images, group_photo):
155
+ save_uploaded_files(reference_images, 'reference')
156
+ save_uploaded_files(group_photo, 'testing')
157
+ return process_image()
158
+
159
+ iface = gr.Interface(
160
+ fn=interface_fn,
161
+ inputs=[
162
+ gr.File(file_types=["image"], file_count="multiple"),
163
+ gr.File(file_types=["image"], file_count="multiple")
164
+ ],
165
+ outputs=gr.Image(),
166
+ title="Face Detection and Blurring",
167
+ description="Upload multiple reference images and a group photo. The app will process the images and return the group photo with blurred faces."
168
+ )
169
+
170
+ if __name__ == "__main__":
171
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.0.0
2
+ aiofiles==23.2.1
3
+ altair==5.1.2
4
+ annotated-types==0.6.0
5
+ anyio==3.7.1
6
+ appnope==0.1.3
7
+ asttokens==2.4.1
8
+ astunparse==1.6.3
9
+ attrs==23.1.0
10
+ cachetools==5.3.2
11
+ certifi==2023.7.22
12
+ charset-normalizer==3.3.2
13
+ click==8.1.7
14
+ colorama==0.4.6
15
+ comm==0.1.4
16
+ contourpy==1.2.0
17
+ cycler==0.12.1
18
+ debugpy==1.8.0
19
+ decorator==5.1.1
20
+ dlib==19.24.2
21
+ exceptiongroup==1.1.3
22
+ executing==2.0.1
23
+ face-recognition-models==0.3.0
24
+ facenet-pytorch==2.5.3
25
+ fastapi==0.104.1
26
+ ffmpy==0.3.1
27
+ filelock==3.13.1
28
+ flatbuffers==23.5.26
29
+ fonttools==4.44.0
30
+ fsspec==2023.10.0
31
+ gast==0.5.4
32
+ google-auth==2.23.4
33
+ google-auth-oauthlib==1.0.0
34
+ google-pasta==0.2.0
35
+ gradio==4.2.0
36
+ gradio_client==0.7.0
37
+ grpcio==1.59.2
38
+ h11==0.14.0
39
+ h5py==3.10.0
40
+ httpcore==1.0.2
41
+ httpx==0.25.1
42
+ huggingface-hub==0.19.0
43
+ idna==3.4
44
+ importlib-resources==6.1.1
45
+ ipykernel==6.26.0
46
+ ipython==8.17.2
47
+ jedi==0.19.1
48
+ Jinja2==3.1.2
49
+ joblib==1.3.2
50
+ jsonschema==4.19.2
51
+ jsonschema-specifications==2023.7.1
52
+ jupyter_client==8.5.0
53
+ jupyter_core==5.5.0
54
+ keras==2.14.0
55
+ kiwisolver==1.4.5
56
+ libclang==16.0.6
57
+ Markdown==3.5.1
58
+ markdown-it-py==3.0.0
59
+ MarkupSafe==2.1.3
60
+ matplotlib==3.8.1
61
+ matplotlib-inline==0.1.6
62
+ mdurl==0.1.2
63
+ ml-dtypes==0.2.0
64
+ mpmath==1.3.0
65
+ mtcnn==0.1.1
66
+ nest-asyncio==1.5.8
67
+ networkx==3.2.1
68
+ numpy==1.26.1
69
+ oauthlib==3.2.2
70
+ opencv-python==4.8.1.78
71
+ opt-einsum==3.3.0
72
+ orjson==3.9.10
73
+ packaging==23.2
74
+ pandas==2.1.3
75
+ parso==0.8.3
76
+ pexpect==4.8.0
77
+ Pillow==10.1.0
78
+ platformdirs==3.11.0
79
+ prompt-toolkit==3.0.39
80
+ protobuf==4.25.0
81
+ psutil==5.9.6
82
+ ptyprocess==0.7.0
83
+ pure-eval==0.2.2
84
+ pyasn1==0.5.0
85
+ pyasn1-modules==0.3.0
86
+ pydantic==2.4.2
87
+ pydantic_core==2.10.1
88
+ pydub==0.25.1
89
+ Pygments==2.16.1
90
+ pyparsing==3.1.1
91
+ python-dateutil==2.8.2
92
+ python-multipart==0.0.6
93
+ pytz==2023.3.post1
94
+ PyYAML==6.0.1
95
+ pyzmq==25.1.1
96
+ referencing==0.30.2
97
+ requests==2.31.0
98
+ requests-oauthlib==1.3.1
99
+ rich==13.6.0
100
+ rpds-py==0.12.0
101
+ rsa==4.9
102
+ scikit-learn==1.3.2
103
+ scipy==1.11.3
104
+ semantic-version==2.10.0
105
+ shellingham==1.5.4
106
+ six==1.16.0
107
+ sniffio==1.3.0
108
+ stack-data==0.6.3
109
+ starlette==0.27.0
110
+ sympy==1.12
111
+ tensorboard==2.14.1
112
+ tensorboard-data-server==0.7.2
113
+ tensorflow==2.14.0
114
+ tensorflow-estimator==2.14.0
115
+ tensorflow-io-gcs-filesystem==0.34.0
116
+ tensorflow-macos==2.14.0
117
+ termcolor==2.3.0
118
+ threadpoolctl==3.2.0
119
+ tomlkit==0.12.0
120
+ toolz==0.12.0
121
+ torch==2.1.0
122
+ torchvision==0.16.0
123
+ tornado==6.3.3
124
+ tqdm==4.66.1
125
+ traitlets==5.13.0
126
+ typer==0.9.0
127
+ typing_extensions==4.8.0
128
+ tzdata==2023.3
129
+ urllib3==2.0.7
130
+ uvicorn==0.24.0.post1
131
+ wcwidth==0.2.9
132
+ websockets==11.0.3
133
+ Werkzeug==3.0.1
134
+ wrapt==1.14.1