yasserrmd commited on
Commit
6fb0ffb
1 Parent(s): 84def21

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -23
app.py CHANGED
@@ -63,26 +63,29 @@ def generate_journal_with_images(video_path, frame_interval=30):
63
  # Make predictions using YOLOv10 on the current frame
64
  results = model.predict(source=frame_rgb, device=device)
65
 
66
- # Plot bounding boxes and labels on the image
67
- annotated_frame = results[0].plot() # Plot detection results on the frame
68
 
69
- # Save the annotated image
70
- frame_filename = os.path.join(output_folder, f"frame_{frame_count}.jpg")
71
- cv2.imwrite(frame_filename, annotated_frame[:, :, ::-1]) # Convert back to BGR for saving
72
- image_paths.append(frame_filename)
73
-
74
- # Extract labels (class indices) and map them to class names
75
- detected_objects = [model.names[int(box.cls)] for box in results[0].boxes] # Access the first result
76
-
77
- # Get current timestamp in the video
78
- timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 # Convert ms to seconds
79
-
80
- # Categorize the detected objects into activities
81
- activity_summary = categorize_activity(detected_objects)
82
-
83
- # Store the activities with their timestamp
84
- for activity, objects in activity_summary.items():
85
- journal_entries.append(f"At {timestamp:.2f} seconds: {', '.join(objects[0])}")
 
 
 
86
 
87
  last_processed_frame = frame # Update the last processed frame
88
 
@@ -90,10 +93,6 @@ def generate_journal_with_images(video_path, frame_interval=30):
90
 
91
  cap.release()
92
 
93
- # Debug print to verify the return values
94
- print(f"journal_entries: {journal_entries}")
95
- print(f"image_paths: {image_paths}")
96
-
97
  return journal_entries, image_paths
98
 
99
 
 
63
  # Make predictions using YOLOv10 on the current frame
64
  results = model.predict(source=frame_rgb, device=device)
65
 
66
+ # Extract detected objects
67
+ detected_objects = [model.names[int(box.cls)] for box in results[0].boxes]
68
 
69
+ # Only process frames where objects are detected
70
+ if detected_objects: # If there are detected objects in the frame
71
+
72
+ # Plot bounding boxes and labels on the image
73
+ annotated_frame = results[0].plot() # Plot detection results on the frame
74
+
75
+ # Save the annotated image
76
+ frame_filename = os.path.join(output_folder, f"frame_{frame_count}.jpg")
77
+ cv2.imwrite(frame_filename, annotated_frame[:, :, ::-1]) # Convert back to BGR for saving
78
+ image_paths.append(frame_filename)
79
+
80
+ # Get current timestamp in the video
81
+ timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 # Convert ms to seconds
82
+
83
+ # Categorize the detected objects into activities
84
+ activity_summary = categorize_activity(detected_objects)
85
+
86
+ # Store the activities with their timestamp
87
+ for activity, objects in activity_summary.items():
88
+ journal_entries.append(f"At {timestamp:.2f} seconds: {', '.join(objects[0])}")
89
 
90
  last_processed_frame = frame # Update the last processed frame
91
 
 
93
 
94
  cap.release()
95
 
 
 
 
 
96
  return journal_entries, image_paths
97
 
98