signlanguage / app.py
alikayh's picture
Update app.py
fb5dc5b verified
# Sign Language Translator with Streamlit
import streamlit as st
import cv2
import numpy as np
import mediapipe as mp
# Load Mediapipe Hands model
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()
# Function to process webcam feed
def process_webcam():
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Convert the frame to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(frame_rgb)
# Draw hand landmarks
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp.solutions.drawing_utils.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# Display the frame
cv2.imshow('Webcam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# Streamlit UI
st.title("Sign Language Translator")
st.write("This application translates sign language into Farsi and provides visual aids for understanding.")
if st.button("Open Webcam"):
st.write("Opening webcam...")
process_webcam()
# Placeholder for translation logic
def translate_sign_language(hand_gesture):
# Placeholder for actual translation logic
return "Translated Text in Farsi"
# Placeholder for visual aid
def show_visual_aid(hand_gesture):
# Placeholder for actual visual aid logic
st.image("path_to_hand_shape_image.png", caption="Hand Shape for Sign Language")
# User input for non-sign language users
user_input = st.text_input("Enter text if you do not know sign language:")
if user_input:
st.write("You entered:", user_input)
# Logic to show visual aid
show_visual_aid(user_input)