|
import gradio as gr |
|
import tensorflow as tf |
|
import numpy as np |
|
import cv2 |
|
from PIL import Image |
|
import io |
|
|
|
|
|
model = tf.keras.applications.MobileNetV2(weights="imagenet") |
|
|
|
def preprocess_image(image): |
|
img = np.array(image) |
|
img = cv2.resize(img, (224, 224)) |
|
img = tf.keras.applications.mobilenet_v2.preprocess_input(img) |
|
return np.expand_dims(img, axis=0) |
|
|
|
def classify_frame(frame): |
|
processed_frame = preprocess_image(frame) |
|
predictions = model.predict(processed_frame) |
|
decoded_predictions = tf.keras.applications.mobilenet_v2.decode_predictions(predictions, top=1)[0] |
|
return decoded_predictions[0][1] |
|
|
|
def process_video(video): |
|
result = "" |
|
cap = cv2.VideoCapture(video) |
|
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
frame_interval = frame_count // 10 |
|
|
|
for i in range(0, frame_count, frame_interval): |
|
cap.set(cv2.CAP_PROP_POS_FRAMES, i) |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
image = Image.fromarray(frame_rgb) |
|
label = classify_frame(image) |
|
|
|
if "baseball" in label.lower(): |
|
result = "The runner is out" |
|
break |
|
|
|
cap.release() |
|
if result == "": |
|
result = "The runner is safe" |
|
|
|
return result |
|
|
|
iface = gr.Interface( |
|
fn=process_video, |
|
inputs=gr.inputs.Video(type="mp4"), |
|
outputs="text", |
|
title="Baseball Runner Status", |
|
description="Upload a baseball video to determine if the runner is out or safe." |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|