Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,14 @@
|
|
1 |
import av
|
|
|
2 |
import torch
|
3 |
import tempfile
|
4 |
import shutil
|
5 |
import atexit
|
|
|
6 |
import gradio as gr
|
7 |
|
|
|
|
|
8 |
|
9 |
def get_video_length_av(video_path):
|
10 |
with av.open(video_path) as container:
|
@@ -46,6 +50,36 @@ def cleanup_temp_directories():
|
|
46 |
print(f"Could not delete directory {temp_dir}")
|
47 |
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def inference(video):
|
50 |
if get_video_length_av(video) > 30:
|
51 |
raise gr.Error("Length of video cannot be over 30 seconds")
|
@@ -54,15 +88,13 @@ def inference(video):
|
|
54 |
|
55 |
temp_dir = tempfile.mkdtemp()
|
56 |
temp_directories.append(temp_dir)
|
|
|
57 |
|
58 |
convert_video(
|
59 |
model, # The loaded model, can be on any device (cpu or cuda).
|
60 |
input_source=video, # A video file or an image sequence directory.
|
61 |
downsample_ratio=0.25, # [Optional] If None, make downsampled max size be 512px.
|
62 |
-
|
63 |
-
output_composition=(
|
64 |
-
temp_dir + "/matted_video.mp4"
|
65 |
-
), # File path if video; directory path if png sequence.
|
66 |
output_alpha=None, # [Optional] Output the raw alpha prediction.
|
67 |
output_foreground=None, # [Optional] Output the raw foreground prediction.
|
68 |
output_video_mbps=4, # Output video mbps. Not needed for png sequence.
|
@@ -70,7 +102,10 @@ def inference(video):
|
|
70 |
num_workers=1, # Only for image sequence input. Reader threads.
|
71 |
progress=True, # Print conversion progress.
|
72 |
)
|
73 |
-
|
|
|
|
|
|
|
74 |
|
75 |
|
76 |
if __name__ == "__main__":
|
@@ -78,7 +113,6 @@ if __name__ == "__main__":
|
|
78 |
atexit.register(cleanup_temp_directories)
|
79 |
|
80 |
model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3")
|
81 |
-
convert_video = torch.hub.load("PeterL1n/RobustVideoMatting", "converter")
|
82 |
|
83 |
if torch.cuda.is_available():
|
84 |
free_memory = get_free_memory_gb()
|
|
|
1 |
import av
|
2 |
+
import os
|
3 |
import torch
|
4 |
import tempfile
|
5 |
import shutil
|
6 |
import atexit
|
7 |
+
import subprocess
|
8 |
import gradio as gr
|
9 |
|
10 |
+
from convert import convert_video
|
11 |
+
|
12 |
|
13 |
def get_video_length_av(video_path):
|
14 |
with av.open(video_path) as container:
|
|
|
50 |
print(f"Could not delete directory {temp_dir}")
|
51 |
|
52 |
|
53 |
+
def ffmpeg_remux_audio(source_video_path, dest_video_path, output_path):
|
54 |
+
# Build the ffmpeg command to extract audio and remux into another video
|
55 |
+
command = [
|
56 |
+
"ffmpeg",
|
57 |
+
"-i",
|
58 |
+
dest_video_path, # Input destination video file
|
59 |
+
"-i",
|
60 |
+
source_video_path, # Input source video file (for the audio)
|
61 |
+
"-c:v",
|
62 |
+
"copy", # Copy the video stream as is
|
63 |
+
"-c:a",
|
64 |
+
"copy", # Copy the audio stream as is
|
65 |
+
"-map",
|
66 |
+
"0:v:0", # Map the video stream from the destination file
|
67 |
+
"-map",
|
68 |
+
"1:a:0", # Map the audio stream from the source file
|
69 |
+
output_path, # Specify the output file path
|
70 |
+
]
|
71 |
+
|
72 |
+
try:
|
73 |
+
# Run the ffmpeg command
|
74 |
+
subprocess.run(command, check=True)
|
75 |
+
except subprocess.CalledProcessError as e:
|
76 |
+
# Handle errors during the subprocess execution
|
77 |
+
print(f"An error occurred: {e}")
|
78 |
+
return dest_video_path
|
79 |
+
|
80 |
+
return output_path
|
81 |
+
|
82 |
+
|
83 |
def inference(video):
|
84 |
if get_video_length_av(video) > 30:
|
85 |
raise gr.Error("Length of video cannot be over 30 seconds")
|
|
|
88 |
|
89 |
temp_dir = tempfile.mkdtemp()
|
90 |
temp_directories.append(temp_dir)
|
91 |
+
output_composition = temp_dir + "/matted_video.mp4"
|
92 |
|
93 |
convert_video(
|
94 |
model, # The loaded model, can be on any device (cpu or cuda).
|
95 |
input_source=video, # A video file or an image sequence directory.
|
96 |
downsample_ratio=0.25, # [Optional] If None, make downsampled max size be 512px.
|
97 |
+
output_composition=output_composition, # File path if video; directory path if png sequence.
|
|
|
|
|
|
|
98 |
output_alpha=None, # [Optional] Output the raw alpha prediction.
|
99 |
output_foreground=None, # [Optional] Output the raw foreground prediction.
|
100 |
output_video_mbps=4, # Output video mbps. Not needed for png sequence.
|
|
|
102 |
num_workers=1, # Only for image sequence input. Reader threads.
|
103 |
progress=True, # Print conversion progress.
|
104 |
)
|
105 |
+
|
106 |
+
resulting_video = f"{temp_dir}/matted_{os.path.split(video)[1]}"
|
107 |
+
|
108 |
+
return ffmpeg_remux_audio(video, output_composition, resulting_video)
|
109 |
|
110 |
|
111 |
if __name__ == "__main__":
|
|
|
113 |
atexit.register(cleanup_temp_directories)
|
114 |
|
115 |
model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3")
|
|
|
116 |
|
117 |
if torch.cuda.is_available():
|
118 |
free_memory = get_free_memory_gb()
|