Spaces:
Sleeping
Sleeping
curryporkchop
commited on
Added ASL Gesture project files
Browse files- .gitattributes +2 -0
- app.py +339 -0
- gestureReference.png +3 -0
- gesture_recognizer.task +3 -0
- requirement.txt +5 -0
- utils.py +11 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
gesture_recognizer.task filter=lfs diff=lfs merge=lfs -text
|
37 |
+
gestureReference.png filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# without hand landmarking
|
2 |
+
# import streamlit as st
|
3 |
+
# import mediapipe as mp
|
4 |
+
# import cv2
|
5 |
+
# import os
|
6 |
+
# import time
|
7 |
+
# from queue import Queue
|
8 |
+
|
9 |
+
# # Import necessary components from MediaPipe
|
10 |
+
# BaseOptions = mp.tasks.BaseOptions
|
11 |
+
# GestureRecognizer = mp.tasks.vision.GestureRecognizer
|
12 |
+
# GestureRecognizerOptions = mp.tasks.vision.GestureRecognizerOptions
|
13 |
+
# GestureRecognizerResult = mp.tasks.vision.GestureRecognizerResult
|
14 |
+
# VisionRunningMode = mp.tasks.vision.RunningMode
|
15 |
+
|
16 |
+
# # Correct path to the Gesture Recognizer model file
|
17 |
+
# model_path = 'model/gesture_recognizer.task'
|
18 |
+
|
19 |
+
# # Check if file exists
|
20 |
+
# if not os.path.exists(model_path):
|
21 |
+
# raise FileNotFoundError(f"Model file not found at {model_path}")
|
22 |
+
|
23 |
+
# # Queue to share results between the callback and main thread
|
24 |
+
# gesture_queue = Queue()
|
25 |
+
|
26 |
+
# # Callback function to process results and add them to the queue
|
27 |
+
# def print_result(result: GestureRecognizerResult, output_image: mp.Image, timestamp_ms: int):
|
28 |
+
# results = [] # Collect gesture results
|
29 |
+
# if result.gestures:
|
30 |
+
# for hand_gestures in result.gestures:
|
31 |
+
# for gesture in hand_gestures:
|
32 |
+
# results.append(f"Gesture: **{gesture.category_name}**, Confidence: **{gesture.score:.2f}**")
|
33 |
+
# else:
|
34 |
+
# results.append("No gestures detected.")
|
35 |
+
# gesture_queue.put(results)
|
36 |
+
|
37 |
+
# # Configure the Gesture Recognizer
|
38 |
+
# options = GestureRecognizerOptions(
|
39 |
+
# base_options=BaseOptions(model_asset_path=model_path),
|
40 |
+
# running_mode=VisionRunningMode.LIVE_STREAM,
|
41 |
+
# result_callback=print_result
|
42 |
+
# )
|
43 |
+
|
44 |
+
# # Custom App Header
|
45 |
+
# st.markdown("<h1 style='text-align: center; color: #4CAF50;'>Gesture Recognition App 🚀</h1>", unsafe_allow_html=True)
|
46 |
+
# st.markdown("<p style='text-align: center; color: grey;'>Recognize hand gestures in real time with MediaPipe and Streamlit</p>", unsafe_allow_html=True)
|
47 |
+
|
48 |
+
# # Sidebar for User Controls
|
49 |
+
# st.sidebar.title("Control Panel")
|
50 |
+
# run_app = st.sidebar.button("Start Gesture Recognition")
|
51 |
+
# st.sidebar.write("Toggle the button above to start the app.")
|
52 |
+
|
53 |
+
# # Placeholder for video feed and results
|
54 |
+
# video_placeholder = st.empty() # Placeholder for the video feed
|
55 |
+
# result_placeholder = st.empty() # Placeholder for gesture results
|
56 |
+
|
57 |
+
# # Footer with branding
|
58 |
+
# st.sidebar.markdown(
|
59 |
+
# "<hr><p style='text-align: center;'>Made with ❤️ using Streamlit</p>", unsafe_allow_html=True
|
60 |
+
# )
|
61 |
+
|
62 |
+
# if run_app:
|
63 |
+
# st.markdown("<h2 style='text-align: center;'>Processing Video Feed...</h2>", unsafe_allow_html=True)
|
64 |
+
# cap = cv2.VideoCapture(0)
|
65 |
+
|
66 |
+
# # Initialize a monotonically increasing timestamp
|
67 |
+
# start_time = time.time()
|
68 |
+
|
69 |
+
# with GestureRecognizer.create_from_options(options) as recognizer:
|
70 |
+
# while cap.isOpened():
|
71 |
+
# success, frame = cap.read()
|
72 |
+
# if not success:
|
73 |
+
# st.warning("No frames available from the video feed.")
|
74 |
+
# break
|
75 |
+
|
76 |
+
# # Compute the current timestamp in milliseconds
|
77 |
+
# current_time_ms = int((time.time() - start_time) * 1000)
|
78 |
+
|
79 |
+
# # Convert frame to a MediaPipe Image
|
80 |
+
# mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
|
81 |
+
|
82 |
+
# # Perform gesture recognition asynchronously
|
83 |
+
# recognizer.recognize_async(mp_image, current_time_ms)
|
84 |
+
|
85 |
+
# # Display the frame in Streamlit
|
86 |
+
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
87 |
+
# video_placeholder.image(frame_rgb, channels="RGB", caption="Gesture Recognition", use_column_width=True)
|
88 |
+
|
89 |
+
# # Retrieve and display gesture results from the queue
|
90 |
+
# while not gesture_queue.empty():
|
91 |
+
# results = gesture_queue.get()
|
92 |
+
# result_placeholder.markdown(
|
93 |
+
# "<h3 style='text-align: center; color: #FF5722;'>Detected Gestures</h3>",
|
94 |
+
# unsafe_allow_html=True,
|
95 |
+
# )
|
96 |
+
# result_placeholder.markdown(
|
97 |
+
# "<ul>" + "".join([f"<li>{result}</li>" for result in results]) + "</ul>",
|
98 |
+
# unsafe_allow_html=True,
|
99 |
+
# )
|
100 |
+
|
101 |
+
# cap.release()
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
# with hand landmark
|
117 |
+
import streamlit as st
|
118 |
+
import mediapipe as mp
|
119 |
+
import cv2
|
120 |
+
import os
|
121 |
+
import time
|
122 |
+
from queue import Queue
|
123 |
+
from utils import display_gesture_chart
|
124 |
+
|
125 |
+
# Initialize MediaPipe Hands for hand landmark detection
|
126 |
+
mp_hands = mp.solutions.hands
|
127 |
+
mp_drawing = mp.solutions.drawing_utils
|
128 |
+
mp_drawing_styles = mp.solutions.drawing_styles
|
129 |
+
|
130 |
+
# Import necessary components from MediaPipe Gesture Recognizer
|
131 |
+
BaseOptions = mp.tasks.BaseOptions
|
132 |
+
GestureRecognizer = mp.tasks.vision.GestureRecognizer
|
133 |
+
GestureRecognizerOptions = mp.tasks.vision.GestureRecognizerOptions
|
134 |
+
GestureRecognizerResult = mp.tasks.vision.GestureRecognizerResult
|
135 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
136 |
+
|
137 |
+
# Correct path to the Gesture Recognizer model file
|
138 |
+
model_path = 'model/gesture_recognizer.task'
|
139 |
+
|
140 |
+
# Check if file exists
|
141 |
+
if not os.path.exists(model_path):
|
142 |
+
raise FileNotFoundError(f"Model file not found at {model_path}")
|
143 |
+
|
144 |
+
# Queue to share results between the callback and main thread
|
145 |
+
gesture_queue = Queue()
|
146 |
+
|
147 |
+
# Callback function to process gesture results and add them to the queue
|
148 |
+
def print_result(result: GestureRecognizerResult, output_image: mp.Image, timestamp_ms: int):
|
149 |
+
results = [] # Collect gesture results
|
150 |
+
if result.gestures:
|
151 |
+
for hand_gestures in result.gestures:
|
152 |
+
for gesture in hand_gestures:
|
153 |
+
results.append(f"{gesture.category_name} (Confidence: {gesture.score:.2f})") # Include confidence
|
154 |
+
else:
|
155 |
+
results.append("No gestures detected.")
|
156 |
+
gesture_queue.put(results)
|
157 |
+
|
158 |
+
# Configure the Gesture Recognizer
|
159 |
+
options = GestureRecognizerOptions(
|
160 |
+
base_options=BaseOptions(model_asset_path=model_path),
|
161 |
+
running_mode=VisionRunningMode.LIVE_STREAM,
|
162 |
+
result_callback=print_result
|
163 |
+
)
|
164 |
+
|
165 |
+
# Initialize session state for saving gestures
|
166 |
+
if "recognized_gestures" not in st.session_state:
|
167 |
+
st.session_state.recognized_gestures = []
|
168 |
+
|
169 |
+
# Custom App Header
|
170 |
+
st.markdown(
|
171 |
+
"""
|
172 |
+
<style>
|
173 |
+
.header {text-align: center; color: #4CAF50; margin-top: -50px;}
|
174 |
+
.description {text-align: center; color: grey; font-size: 16px;}
|
175 |
+
</style>
|
176 |
+
<h1 class="header">Gesture & Hand Landmark Detection 🚀</h1>
|
177 |
+
<p class="description">Recognize and save hand gestures in real time with MediaPipe.</p>
|
178 |
+
""",
|
179 |
+
unsafe_allow_html=True,
|
180 |
+
)
|
181 |
+
|
182 |
+
# Sidebar for User Controls
|
183 |
+
st.sidebar.title("Control Panel")
|
184 |
+
st.sidebar.markdown("<hr>", unsafe_allow_html=True)
|
185 |
+
|
186 |
+
# Display gesture chart in the sidebar
|
187 |
+
gesture_chart_path = "./gestureReference.png" # Update this with the actual path to the image
|
188 |
+
display_gesture_chart(gesture_chart_path)
|
189 |
+
|
190 |
+
max_num_hands = st.sidebar.slider("Max Number of Hands", 1, 2, 1)
|
191 |
+
skip_frames = st.sidebar.slider("Process Every Nth Frame", 1, 10, 5)
|
192 |
+
resolution = st.sidebar.selectbox("Frame Resolution", ["320x240", "640x480"], index=0)
|
193 |
+
|
194 |
+
st.sidebar.markdown("<hr>", unsafe_allow_html=True)
|
195 |
+
|
196 |
+
# Start and Stop buttons
|
197 |
+
if "run_app" not in st.session_state:
|
198 |
+
st.session_state.run_app = False
|
199 |
+
|
200 |
+
col1, col2 = st.sidebar.columns(2)
|
201 |
+
if col1.button("▶ Start"):
|
202 |
+
st.session_state.run_app = True
|
203 |
+
|
204 |
+
if col2.button("⏹ Stop"):
|
205 |
+
st.session_state.run_app = False
|
206 |
+
|
207 |
+
# Clear history button
|
208 |
+
if st.sidebar.button("🗑️ Clear History"):
|
209 |
+
st.session_state.recognized_gestures = []
|
210 |
+
|
211 |
+
# Layout with columns: Live camera feed on the left, gesture log box on the right
|
212 |
+
col_feed, col_log = st.columns([5, 2])
|
213 |
+
|
214 |
+
with col_feed:
|
215 |
+
st.markdown("### Live Camera Feed")
|
216 |
+
video_placeholder = st.empty() # Placeholder for the video feed
|
217 |
+
|
218 |
+
with col_log:
|
219 |
+
st.markdown("### Gesture Log")
|
220 |
+
current_gesture_box = st.empty() # Box to display the most recent gesture dynamically
|
221 |
+
st.markdown("### Gesture History")
|
222 |
+
gesture_history_box = st.empty() # Box to display all recognized gestures dynamically
|
223 |
+
|
224 |
+
# Footer with branding
|
225 |
+
st.sidebar.markdown(
|
226 |
+
"""
|
227 |
+
<style>
|
228 |
+
.footer {text-align: center; font-size: 12px; color: grey; margin-top: 20px;}
|
229 |
+
</style>
|
230 |
+
<p class="footer">Made by Marco Chen, William Taka, Rigoberto Ponce using Streamlit, MediaPipe & OpenCV</p>
|
231 |
+
""",
|
232 |
+
unsafe_allow_html=True,
|
233 |
+
)
|
234 |
+
|
235 |
+
if st.session_state.run_app:
|
236 |
+
cap = cv2.VideoCapture(0)
|
237 |
+
|
238 |
+
# Parse resolution
|
239 |
+
res_width, res_height = map(int, resolution.split("x"))
|
240 |
+
|
241 |
+
# Initialize a monotonically increasing timestamp
|
242 |
+
start_time = time.time()
|
243 |
+
|
244 |
+
with GestureRecognizer.create_from_options(options) as recognizer, mp_hands.Hands(
|
245 |
+
max_num_hands=max_num_hands,
|
246 |
+
model_complexity=1, # Simplified model for performance 0 for faster run
|
247 |
+
min_detection_confidence=0.5, #0.3
|
248 |
+
min_tracking_confidence=0.5 #0.3
|
249 |
+
) as hands:
|
250 |
+
frame_count = 0
|
251 |
+
while st.session_state.run_app and cap.isOpened():
|
252 |
+
success, frame = cap.read()
|
253 |
+
if not success:
|
254 |
+
st.warning("No frames available from the video feed.")
|
255 |
+
break
|
256 |
+
|
257 |
+
frame_count += 1
|
258 |
+
if frame_count % skip_frames != 0:
|
259 |
+
continue
|
260 |
+
|
261 |
+
# Flip and resize frame
|
262 |
+
frame = cv2.flip(frame, 1)
|
263 |
+
frame = cv2.resize(frame, (res_width, res_height))
|
264 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
265 |
+
|
266 |
+
# Perform hand landmark detection
|
267 |
+
hand_results = hands.process(frame_rgb)
|
268 |
+
|
269 |
+
# Perform gesture recognition
|
270 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
|
271 |
+
current_time_ms = int((time.time() - start_time) * 1000)
|
272 |
+
recognizer.recognize_async(mp_image, current_time_ms)
|
273 |
+
|
274 |
+
# Draw hand landmarks on the frame
|
275 |
+
if hand_results.multi_hand_landmarks:
|
276 |
+
for hand_landmarks in hand_results.multi_hand_landmarks:
|
277 |
+
mp_drawing.draw_landmarks(
|
278 |
+
frame,
|
279 |
+
hand_landmarks,
|
280 |
+
mp_hands.HAND_CONNECTIONS,
|
281 |
+
mp_drawing_styles.get_default_hand_landmarks_style(),
|
282 |
+
mp_drawing_styles.get_default_hand_connections_style(),
|
283 |
+
)
|
284 |
+
|
285 |
+
# Retrieve and display gesture results from the queue
|
286 |
+
while not gesture_queue.empty():
|
287 |
+
results = gesture_queue.get()
|
288 |
+
if results:
|
289 |
+
new_gesture = results[-1]
|
290 |
+
|
291 |
+
# Extract label and confidence safely
|
292 |
+
if " (Confidence: " in new_gesture:
|
293 |
+
label, confidence = new_gesture.split(" (Confidence: ")
|
294 |
+
confidence = confidence.rstrip(")") # Remove the trailing parenthesis
|
295 |
+
else:
|
296 |
+
label = new_gesture
|
297 |
+
confidence = "N/A"
|
298 |
+
|
299 |
+
# Add new gesture to history only if it's not already logged
|
300 |
+
if label.isalpha() and new_gesture not in st.session_state.recognized_gestures:
|
301 |
+
st.session_state.recognized_gestures.append(new_gesture)
|
302 |
+
|
303 |
+
# Update current gesture display
|
304 |
+
current_gesture_box.markdown(
|
305 |
+
f"<h4 style='text-align: center; color: #4CAF50;'>Gesture: {label}<br>Confidence: {confidence}</h4>",
|
306 |
+
unsafe_allow_html=True,
|
307 |
+
)
|
308 |
+
|
309 |
+
# Update gesture history display
|
310 |
+
gesture_history_box.text_area(
|
311 |
+
"Gesture History:",
|
312 |
+
value="\n".join(reversed(st.session_state.recognized_gestures)),
|
313 |
+
height=300,
|
314 |
+
disabled=True,
|
315 |
+
)
|
316 |
+
|
317 |
+
# Display the frame with hand landmarks and gesture results
|
318 |
+
video_placeholder.image(frame, channels="BGR", caption="Gesture & Hand Landmark Detection", use_column_width=True)
|
319 |
+
|
320 |
+
cap.release()
|
321 |
+
|
322 |
+
|
323 |
+
|
324 |
+
|
325 |
+
|
326 |
+
|
327 |
+
|
328 |
+
|
329 |
+
|
330 |
+
|
331 |
+
|
332 |
+
|
333 |
+
|
334 |
+
|
335 |
+
|
336 |
+
|
337 |
+
|
338 |
+
|
339 |
+
|
gestureReference.png
ADDED
Git LFS Details
|
gesture_recognizer.task
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:13aa0bd33974391c1b8d31e5cf77e29df346f3633781c49d7d307bdf8f2de85b
|
3 |
+
size 8764485
|
requirement.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
mediapipe
|
3 |
+
opencv-python
|
4 |
+
pillow
|
5 |
+
numpy
|
utils.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
def display_gesture_chart(image_path):
|
4 |
+
"""
|
5 |
+
Displays the gesture chart in the sidebar.
|
6 |
+
|
7 |
+
Args:
|
8 |
+
image_path (str): Path to the gesture chart image.
|
9 |
+
"""
|
10 |
+
st.sidebar.markdown("### Gesture Reference")
|
11 |
+
st.sidebar.image(image_path, caption="ASL Gesture Chart", use_column_width=True)
|