import cv2 import numpy as np from tensorflow.lite.python.interpreter import Interpreter def tflite_detect_webcam(modelpath, lblpath, min_conf=0.5, txt_only=False, frame_skip=5): # Load the label map into memory with open(lblpath, 'r') as f: labels = [line.strip() for line in f.readlines()] # Load the TensorFlow Lite model into memory interpreter = Interpreter(model_path=modelpath) interpreter.allocate_tensors() # Get model details input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_height, input_width, _ = input_details[0]['shape'][1:] float_input = (input_details[0]['dtype'] == np.float32) input_mean = 127.5 input_std = 127.5 # # Open dscfwvrbentsa webcam #camera_url = "rtsp://admin:Lubanzi@@192.168.0.200:554/Streaming/Channels/101" cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) frame_rate = 20 cap.set(cv2.CAP_PROP_FPS, frame_rate) frame_counter = 0 while True: # Read frame from the webcam ret, frame = cap.read() # Check if the frame is empty if not ret: print("Error reading frame from the webcam") break frame_counter += 2 # Skip frames if needed if frame_counter % frame_skip != 0: continue # Resize the frame to the expected shape [1xHxWx3] input_data = cv2.resize(frame, (input_width, input_height)) # Check if the resized frame is empty if input_data.size == 0: print("Error resizing the frame") break input_data = np.expand_dims(input_data, axis=0) # Normalize pixel values if using a floating model (i.e., if the model is non-quantized) if float_input: input_data = (np.float32(input_data) - input_mean) / input_std # Perform the actual detection by running the model with the image as input interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() # Retrieve detection results boxes = interpreter.get_tensor(output_details[1]['index'])[0] # Bounding box coordinates of detected objects classes = interpreter.get_tensor(output_details[3]['index'])[0] # Class index of detected objects scores = interpreter.get_tensor(output_details[0]['index'])[0] # Confidence of detected object # Loop over all detections and draw detection box if confidence is above the minimum threshold for i in range(len(scores)): if 0.0 < scores[i] <= 1.0 and scores[i] > min_conf: ymin, xmin, ymax, xmax = boxes[i] ymin, xmin, ymax, xmax = ( int(ymin * frame.shape[0]), int(xmin * frame.shape[1]), int(ymax * frame.shape[0]), int(xmax * frame.shape[1]), ) # Draw bounding box # Draw label object_name = labels[int(classes[i])] label = f'{object_name}: {int(scores[i] * 100)}%' label_ymin = max(ymin+26, 10) cv2.putText(frame, label, (xmin, label_ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) # Draw corners with different colors corner_thickness = 6 # Thickness of the lines at corners corner_offset = 10 # Offset for the corners from the bounding box corners corner_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]# Define different colors for corners # Top-left corner cv2.line(frame, (xmin, ymin), (xmin + corner_offset, ymin), corner_colors[1], thickness=corner_thickness) cv2.line(frame, (xmin, ymin), (xmin, ymin + corner_offset), corner_colors[1], thickness=corner_thickness) # Top-right corner cv2.line(frame, (xmax, ymin), (xmax - corner_offset, ymin), corner_colors[1], thickness=corner_thickness) cv2.line(frame, (xmax, ymin), (xmax, ymin + corner_offset), corner_colors[1], thickness=corner_thickness) # Bottom-left corner cv2.line(frame, (xmin, ymax), (xmin + corner_offset, ymax), corner_colors[1], thickness=corner_thickness) cv2.line(frame, (xmin, ymax), (xmin, ymax - corner_offset), corner_colors[1], thickness=corner_thickness) # Bottom-right corner cv2.line(frame, (xmax, ymax), (xmax - corner_offset, ymax), corner_colors[1], thickness=corner_thickness) cv2.line(frame, (xmax, ymax), (xmax, ymax - corner_offset), corner_colors[1], thickness=corner_thickness) # Display the frame cv2.imshow("Object Detection", frame) # Break the loop if 'q' key is pressed if cv2.waitKey(1) & 0xFF == ord('q'): break # Release the webcam and close the OpenCV window cap.release() cv2.destroyAllWindows() # Example usage: model_path = '/Users/macmini/Desktop/pptflite/detect.tflite' label_path = '/Users/macmini/Desktop/pptflite/labelmap.txt' tflite_detect_webcam(model_path, label_path, min_conf=0.5, frame_skip=5)