detect_face / app.py
itmorn's picture
init
acdde40
raw
history blame
1.9 kB
import cv2
import gradio as gr
from z_app_factory import get_app
def inference(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
annotated_image = image.copy()
lst2d_res = get_app(image)
thickness = 3
lineType = 8
font = cv2.FONT_HERSHEY_SIMPLEX
for face in lst2d_res:
bbox = [int(i) for i in face["bbox"]]
score = face['score']
point_color = (0, int(255 * score), 0) # BGR
x1, y1 = bbox[:2]
x2, y2 = bbox[2:]
cv2.putText(annotated_image, str(score)[:4], (x1, y1 - 10), font, 0.8, (0, 255, 0), 2)
cv2.line(annotated_image, (x1, y1), (x2, y1), point_color, thickness, lineType)
cv2.line(annotated_image, (x2, y1), (x2, y2), point_color, thickness, lineType)
cv2.line(annotated_image, (x1, y1), (x1, y2), point_color, thickness, lineType)
cv2.line(annotated_image, (x1, y2), (x2, y2), point_color, thickness, lineType)
for kp in face["kps"]:
x, y = [int(i) for i in kp]
cv2.circle(annotated_image, (x, y), 2, (2, 30, 200), 2)
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR)
return annotated_image
title = "Detect Face"
description = "demo for Detect Face. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://www.yuque.com/itmorn/ability/detect_face' target='_blank'>Project Documents</a> | <a href='https://www.bilibili.com/video/BV1cg411v7CR' target='_blank'>Video Demo</a></p>"
gr.Interface(
inference,
[gr.inputs.Image(label="Input")],
gr.outputs.Image(type="pil", label="Output"),
title=title,
description=description,
article=article,
examples=[
["imgs/face1.jpg"],
["imgs/face2.jpg"],
["imgs/11.jpg"]
]).launch(debug=True)