Rahatara's picture
Create app.py
10c3a07 verified
raw
history blame
1.86 kB
import cv2
import gradio as gr
import google.generativeai as genai
import os
import PIL.Image
# Configure the API key for Google Generative AI
genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
# Define the Generative AI model
model = genai.GenerativeModel('gemini-1.5-flash')
# Function to capture frames from a video
def frame_capture(video_path, num_frames=5):
vidObj = cv2.VideoCapture(video_path)
frames = []
total_frames = int(vidObj.get(cv2.CAP_PROP_FRAME_COUNT))
frame_step = max(1, total_frames // num_frames)
count = 0
while len(frames) < num_frames:
vidObj.set(cv2.CAP_PROP_POS_FRAMES, count)
success, image = vidObj.read()
if not success:
break
frames.append(image)
count += frame_step
vidObj.release()
return frames
# Function to generate text descriptions for frames
def generate_descriptions_for_frames(video_path):
frames = frame_capture(video_path)
images = [PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames]
prompt = "Describe what is happening in each of these frames."
images_with_prompt = [prompt] + images
responses = model.generate_content(images_with_prompt)
descriptions = [response.text for response in responses]
formatted_description = format_descriptions(descriptions)
return formatted_description
# Helper function to format descriptions
def format_descriptions(descriptions):
return ' '.join(descriptions).strip()
# Define Gradio interface
video_input = gr.Video(label="Upload or Record Video", source="upload", type="filepath")
output_text = gr.Textbox(label="Video Analysis")
# Create Gradio app
gr.Interface(fn=generate_descriptions_for_frames, inputs=video_input, outputs=output_text, title="Video Content Detection System").launch()