File size: 2,470 Bytes
10c3a07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e05482a
 
10c3a07
 
 
e05482a
 
 
 
 
10c3a07
 
 
 
 
e05482a
10c3a07
 
 
 
 
e05482a
 
 
 
 
 
10c3a07
d11110b
e05482a
2ea7ef9
10c3a07
 
e05482a
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import cv2
import gradio as gr
import google.generativeai as genai
import os
import PIL.Image

# Configure the API key for Google Generative AI
genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))

# Define the Generative AI model
model = genai.GenerativeModel('gemini-1.5-flash')

# Function to capture frames from a video
def frame_capture(video_path, num_frames=5):
    vidObj = cv2.VideoCapture(video_path)
    frames = []
    total_frames = int(vidObj.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_step = max(1, total_frames // num_frames)
    count = 0
    
    while len(frames) < num_frames:
        vidObj.set(cv2.CAP_PROP_POS_FRAMES, count)
        success, image = vidObj.read()
        if not success:
            break
        frames.append(image)
        count += frame_step
    
    vidObj.release()
    return frames

# Function to generate text descriptions for frames or answer a specific question
def analyze_video(video_path, user_question):
    frames = frame_capture(video_path)
    images = [PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames]
    
    if user_question.strip():
        prompt = f"Based on these video frames, {user_question}"
    else:
        prompt = "Describe what is happening in each of these frames in this video sequentially."
    
    images_with_prompt = [prompt] + images
    
    responses = model.generate_content(images_with_prompt)
    descriptions = [response.text for response in responses]
    
    return descriptions[-1] if user_question.strip() else format_descriptions(descriptions)

# Helper function to format descriptions
def format_descriptions(descriptions):
    return ' '.join(descriptions).strip()

# Function to handle chat interaction
def chat_interaction(video_path, chatbot, user_message):
    response = analyze_video(video_path, user_message)
    chatbot.append((user_message, response))
    return "", chatbot

# Define Gradio interface
video_input = gr.Video(label="Upload Video", autoplay=True)
chatbot = gr.Chatbot(label="Video Analysis Chatbot")
user_input = gr.Textbox(label="Ask something specific about the video", placeholder="E.g., Are there any cars in this video?")

# Create Gradio app
with gr.Blocks() as demo:
    with gr.Column():
        video_input.render()
        chatbot.render()
        user_input.render()
        user_input.submit(fn=chat_interaction, inputs=[video_input, chatbot, user_input], outputs=[user_input, chatbot])

demo.launch()