Prathamesh1420 commited on
Commit
715c15e
·
verified ·
1 Parent(s): c4cefab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -81
app.py CHANGED
@@ -3,72 +3,33 @@ import streamlit as st
3
  import numpy as np
4
  import tempfile
5
  import os
 
6
  from ultralytics import YOLO
7
- from streamlit_webrtc import (webrtc_streamer, VideoProcessorBase, WebRtcMode, RTCConfiguration)
8
- import av
9
- from turn import get_ice_servers
10
-
11
- model = YOLO('yolov8n.pt')
12
-
13
- # Global variable to store the latest frame with bounding boxes
14
- cached_frame = None
15
- frame_skip = 5 # Process every 5th frame
16
- # # Define a custom video processor class inheriting from VideoProcessorBase
17
- # class VideoProcessor(VideoProcessorBase):
18
- # def __init__(self):
19
- # self.model = model
20
- # self.frame_skip = 10 # Class-level variable for frame skipping
21
- # self.cached_frame = None # Class-level variable for cached frames
22
-
23
- def recv(frame: av.VideoFrame) -> av.VideoFrame:
24
- # Skip frames to reduce processing load
25
- # global frame_skip, cached_frame
26
-
27
- # if frame_skip > 0:
28
- # frame_skip -= 1
29
- # return frame
30
-
31
- # Reset frame skip
32
- # frame_skip = 5
33
-
34
- # Convert frame to OpenCV format (BGR)
35
- frame_bgr = frame.to_ndarray(format="bgr24")
36
-
37
- # Resize frame to reduce processing time
38
- frame_resized = cv2.resize(frame_bgr, (160, 120)) # Instead of 640x480
39
-
40
- # # Detect and track objects using YOLOv8
41
- # results = model.track(frame_resized, persist=True)
42
-
43
- # # Plot results
44
- # frame_annotated = results[0].plot()
45
-
46
- # # Cache the annotated frame
47
- # cached_frame = frame_annotated
48
-
49
-
50
- # Process every nth frame
51
- if frame_skip == 0:
52
- # Reset the frame skip counter
53
- frame_skip = 10
54
-
55
- # Detect and track objects using YOLOv8
56
- results = model.track(frame_resized, persist=True)
57
-
58
- # Plot results
59
- frame_annotated = results[0].plot()
60
-
61
- # Cache the annotated frame
62
- cached_frame = frame_annotated
63
- else:
64
- # Use the cached frame for skipped frames
65
- frame_annotated = cached_frame if cached_frame is not None else frame_resized
66
- frame_skip -= 1
67
-
68
- # Convert frame back to RGB format
69
- frame_rgb = cv2.cvtColor(frame_annotated, cv2.COLOR_BGR2RGB)
70
-
71
- return av.VideoFrame.from_ndarray(frame_rgb, format="rgb24")
72
 
73
  # Streamlit web app
74
  def main():
@@ -83,22 +44,7 @@ def main():
83
 
84
  if option == "Live Stream":
85
  # Start the WebRTC stream with object tracking
86
- # WebRTC streamer configuration
87
- # Define RTC configuration for WebRTC
88
- # RTC_CONFIGURATION = RTCConfiguration({
89
- # "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
90
- # })
91
- # Start the WebRTC stream with object tracking
92
- # webrtc_streamer(key="live-stream", video_frame_callback=recv,
93
- # rtc_configuration=rtc_configuration, sendback_audio=False)
94
- webrtc_streamer(key="live-stream",
95
- #mode=WebRtcMode.SENDRECV,
96
- video_frame_callback=recv,
97
- rtc_configuration={"iceServers": get_ice_servers()},
98
- media_stream_constraints={"video": True, "audio": False},
99
- async_processing=True)
100
-
101
-
102
 
103
  elif option == "Upload Video":
104
  # File uploader for video upload
@@ -124,6 +70,8 @@ def main():
124
 
125
  # Function to perform object tracking on uploaded video
126
  def track_uploaded_video(video_file, stop_button, frame_placeholder):
 
 
127
 
128
  # Create a temporary file to save the uploaded video
129
  temp_video = tempfile.NamedTemporaryFile(delete=False)
 
3
  import numpy as np
4
  import tempfile
5
  import os
6
+ import asyncio
7
  from ultralytics import YOLO
8
+ from streamlit_webrtc import VideoTransformerBase, webrtc_streamer
9
+
10
+ # Define a video transformer for object tracking
11
+ class ObjectTrackingTransformer(VideoTransformerBase):
12
+ def __init__(self):
13
+ # Load YOLOv8 model
14
+ self.model = YOLO('yolov8n.pt')
15
+
16
+ def transform(self, frame):
17
+ # Convert frame to OpenCV format (BGR)
18
+ frame_bgr = np.array(frame.to_image())
19
+
20
+ # Resize frame to reduce processing time
21
+ frame_resized = cv2.resize(frame_bgr, (640, 480))
22
+
23
+ # Detect and track objects using YOLOv8
24
+ results = self.model.track(frame_resized, persist=True)
25
+
26
+ # Plot results
27
+ frame_annotated = results[0].plot()
28
+
29
+ # Convert frame back to RGB format
30
+ frame_rgb = cv2.cvtColor(frame_annotated, cv2.COLOR_BGR2RGB)
31
+
32
+ return frame_rgb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  # Streamlit web app
35
  def main():
 
44
 
45
  if option == "Live Stream":
46
  # Start the WebRTC stream with object tracking
47
+ webrtc_streamer(key="live-stream", video_transformer_factory=ObjectTrackingTransformer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  elif option == "Upload Video":
50
  # File uploader for video upload
 
70
 
71
  # Function to perform object tracking on uploaded video
72
  def track_uploaded_video(video_file, stop_button, frame_placeholder):
73
+ # Load YOLOv8 model
74
+ model = YOLO('yolov8n.pt')
75
 
76
  # Create a temporary file to save the uploaded video
77
  temp_video = tempfile.NamedTemporaryFile(delete=False)