import tensorflow as tf import cv2 import numpy as np from glob import glob # from models import Yolov4 import gradio as gr # model = Yolov4(weight_path="best.pt", class_name_path='coco_classes.txt') # from ultralytics import YOLO # Load a model # model = YOLO("best.pt") # load a custom model # Predict with the model # results = model("image.jpg", save = True) # predict on an image face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') def detect_faces(frame): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) print(f"Detected {len(faces)} faces") return len(faces) def detect_faces_in_video(): success, frame = camera.read() if success: num_faces = detect_faces(frame) return int(num_faces) else: return None """def gradio_wrapper(img): global face_cascade #print(np.shape(img)) results = model.predict(img) # predict on an image try: if max(results[0].boxes.cls) == 0: text = "Man" if max(results[0].boxes.cls) == 1: text = "Women" except: pass return cv2.putText(img, text,(00, 185), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA, False) # return results """ demo = gr.Interface( detect_faces_in_video, #gr.Image(source="webcam", streaming=True, flip=True), gr.Image(source="webcam", streaming=True), "image", live=True ) demo.launch()