yasserrmd commited on
Commit
fd362dd
1 Parent(s): c3d6a85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -27
app.py CHANGED
@@ -37,6 +37,7 @@ def generate_journal_with_images(video_path):
37
  journal_entries = {}
38
  saved_images = []
39
  frame_count = 0
 
40
  output_folder = "detected_frames"
41
  os.makedirs(output_folder, exist_ok=True) # Create folder to store images
42
 
@@ -45,33 +46,37 @@ def generate_journal_with_images(video_path):
45
  if not ret:
46
  break
47
 
48
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
49
-
50
- # Make predictions using YOLOv10 on the current frame
51
- results = model.predict(source=frame_rgb, device=device)
52
-
53
- # Plot bounding boxes and labels on the image
54
- annotated_frame = results[0].plot() # Plot detection results on the frame
55
-
56
- # Save the annotated image
57
- frame_filename = os.path.join(output_folder, f"frame_{frame_count}.jpg")
58
- cv2.imwrite(frame_filename, annotated_frame[:, :, ::-1]) # Convert back to BGR for saving
59
- saved_images.append(frame_filename)
60
-
61
- # Extract labels (class indices) and map them to class names
62
- detected_objects = [model.names[int(box.cls)] for box in results[0].boxes] # Access the first result
63
-
64
- # Get current timestamp in the video
65
- timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 # Convert ms to seconds
66
-
67
- # Categorize the detected objects into activities
68
- activity_summary = categorize_activity(detected_objects)
69
-
70
- # Store the activities with their timestamp
71
- for activity, objects in activity_summary.items():
72
- if activity not in journal_entries:
73
- journal_entries[activity] = []
74
- journal_entries[activity].append((f"At {timestamp:.2f} seconds: {', '.join(objects[0])}", frame_filename))
 
 
 
 
75
 
76
  frame_count += 1
77
 
 
37
  journal_entries = {}
38
  saved_images = []
39
  frame_count = 0
40
+ last_processed_frame = None
41
  output_folder = "detected_frames"
42
  os.makedirs(output_folder, exist_ok=True) # Create folder to store images
43
 
 
46
  if not ret:
47
  break
48
 
49
+ # Process every Nth frame or if the current frame is different from the last processed frame
50
+ if frame_count % frame_interval == 0 or (last_processed_frame is not None and is_frame_different(last_processed_frame, frame)):
51
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
52
+
53
+ # Make predictions using YOLOv10 on the current frame
54
+ results = model.predict(source=frame_rgb, device=device)
55
+
56
+ # Plot bounding boxes and labels on the image
57
+ annotated_frame = results[0].plot() # Plot detection results on the frame
58
+
59
+ # Save the annotated image
60
+ frame_filename = os.path.join(output_folder, f"frame_{frame_count}.jpg")
61
+ cv2.imwrite(frame_filename, annotated_frame[:, :, ::-1]) # Convert back to BGR for saving
62
+ saved_images.append(frame_filename)
63
+
64
+ # Extract labels (class indices) and map them to class names
65
+ detected_objects = [model.names[int(box.cls)] for box in results[0].boxes] # Access the first result
66
+
67
+ # Get current timestamp in the video
68
+ timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 # Convert ms to seconds
69
+
70
+ # Categorize the detected objects into activities
71
+ activity_summary = categorize_activity(detected_objects)
72
+
73
+ # Store the activities with their timestamp
74
+ for activity, objects in activity_summary.items():
75
+ if activity not in journal_entries:
76
+ journal_entries[activity] = []
77
+ journal_entries[activity].append((f"At {timestamp:.2f} seconds: {', '.join(objects[0])}", frame_filename))
78
+
79
+ last_processed_frame = frame # Update the last processed frame
80
 
81
  frame_count += 1
82