annapurnapadmaprema-ji's picture
Update app.py
ff1d39f verified
import streamlit as st
import numpy as np
import cv2
import tempfile
import os
# Load model files
prototxt_path = "colorization_deploy_v2.prototxt"
model_path = "colorization_release_v2.caffemodel"
kernel_path = "pts_in_hull.npy"
# Streamlit app title
st.title("Video Colorization App")
# File upload
uploaded_video = st.file_uploader("Upload a black and white video", type=["mp4", "avi"])
if uploaded_video is not None:
# Save uploaded video to a temporary file
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(uploaded_video.read())
video_path = tfile.name
# Output path for the colorized video
output_path = os.path.join(tempfile.gettempdir(), "colorized_video.mp4")
# Load the pre-trained model
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
points = np.load(kernel_path)
points = points.transpose().reshape(2, 313, 1, 1)
net.getLayer(net.getLayerId("class8_ab")).blobs = [points.astype(np.float32)]
net.getLayer(net.getLayerId("conv8_313_rh")).blobs = [np.full([1, 313], 2.686, dtype="float32")]
# Open the video file
cap = cv2.VideoCapture(video_path)
# Get video properties
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Create a VideoWriter object to save the colorized video
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Initialize progress bar and frame counter
frame_count = 0
progress_bar = st.progress(0)
progress_text = st.empty() # Placeholder for frame count text
# Process each frame
while True:
ret, frame = cap.read()
if not ret:
break
frame_count += 1
progress_text.text(f"Processing frame {frame_count} of {total_frames}")
# Convert frame to LAB color space and preprocess
normalized = frame.astype("float32") / 255.0
lab = cv2.cvtColor(normalized, cv2.COLOR_BGR2LAB)
resized = cv2.resize(lab, (224, 224))
L = cv2.split(resized)[0]
L -= 43
# Set the input and get the colorization
net.setInput(cv2.dnn.blobFromImage(L))
ab = net.forward()[0, :, :, :].transpose((1, 2, 0))
ab = cv2.resize(ab, (frame.shape[1], frame.shape[0]))
# Combine with the L channel
L = cv2.split(lab)[0]
colorized = np.concatenate((L[:, :, np.newaxis], ab), axis=2)
colorized = cv2.cvtColor(colorized, cv2.COLOR_LAB2BGR)
colorized = (255 * colorized).astype("uint8")
# Update the progress line and frame count display
progress_bar.progress(frame_count / total_frames)
# Write colorized frame to output
out.write(colorized)
# Release resources
cap.release()
out.release()
# Display the colorized video
st.success("Video colorization completed!")
st.video(output_path)
# Provide a download link for the colorized video
with open(output_path, "rb") as file:
st.download_button(label="Download Colorized Video", data=file, file_name="colorized_video.mp4", mime="video/mp4")