Spaces:
Running
Running
create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
!pip install -U openai-whisper
|
2 |
+
!pip install -U git+https://github.com/linto-ai/whisper-timestamped
|
3 |
+
!pip install gradio moviepy whisper-timestamped
|
4 |
+
|
5 |
+
import os
|
6 |
+
import datetime as dt
|
7 |
+
import json
|
8 |
+
import whisper_timestamped as whisper
|
9 |
+
from moviepy.video.io.VideoFileClip import VideoFileClip
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
# Helper functions and global variables
|
13 |
+
outdir = dt.datetime.now().strftime("%Y%m%d%H%M")
|
14 |
+
if os.path.exists(outdir):
|
15 |
+
random_digits = str(random.randint(1000, 9999))
|
16 |
+
new_outdir = outdir + random_digits
|
17 |
+
os.mkdir(new_outdir)
|
18 |
+
outdir = new_outdir
|
19 |
+
print("Created new output directory:", new_outdir)
|
20 |
+
else:
|
21 |
+
os.system(f"mkdir {outdir}")
|
22 |
+
print("date time now:" + outdir)
|
23 |
+
|
24 |
+
model = whisper.load_model("base")
|
25 |
+
|
26 |
+
def generate_timestamps(vidname):
|
27 |
+
audio = whisper.load_audio(vidname)
|
28 |
+
result = whisper.transcribe(model, audio, language="en")
|
29 |
+
return result
|
30 |
+
|
31 |
+
def get_segment_info(data):
|
32 |
+
new_list = []
|
33 |
+
for segment in data.get("segments", []):
|
34 |
+
if "id" in segment and "start" in segment and "end" in segment and "text" in segment:
|
35 |
+
new_item = {
|
36 |
+
"id": segment["id"],
|
37 |
+
"start": segment["start"],
|
38 |
+
"end": segment["end"],
|
39 |
+
"text": segment["text"]
|
40 |
+
}
|
41 |
+
new_list.append(new_item)
|
42 |
+
return new_list
|
43 |
+
|
44 |
+
def combine_entries(entries):
|
45 |
+
combined_entries = []
|
46 |
+
current_entry = None
|
47 |
+
total_duration = 0
|
48 |
+
|
49 |
+
for entry in entries:
|
50 |
+
entry_duration = entry["end"] - entry["start"]
|
51 |
+
|
52 |
+
if total_duration + entry_duration > 30:
|
53 |
+
if current_entry:
|
54 |
+
current_entry["end"] = entry["end"]
|
55 |
+
combined_entries.append(current_entry)
|
56 |
+
|
57 |
+
current_entry = {
|
58 |
+
"start": entry["start"],
|
59 |
+
"end": entry["end"],
|
60 |
+
"text": entry["text"]
|
61 |
+
}
|
62 |
+
total_duration = entry_duration
|
63 |
+
else:
|
64 |
+
if current_entry:
|
65 |
+
current_entry["end"] = entry["end"]
|
66 |
+
current_entry["text"] += " " + entry["text"]
|
67 |
+
total_duration += entry_duration
|
68 |
+
else:
|
69 |
+
current_entry = {
|
70 |
+
"start": entry["start"],
|
71 |
+
"end": entry["end"],
|
72 |
+
"text": entry["text"]
|
73 |
+
}
|
74 |
+
total_duration = entry_duration
|
75 |
+
|
76 |
+
if current_entry:
|
77 |
+
combined_entries.append(current_entry)
|
78 |
+
|
79 |
+
return combined_entries
|
80 |
+
|
81 |
+
def extract_video_segment(input_video, output_video, start_time, end_time):
|
82 |
+
video_clip = VideoFileClip(input_video).subclip(start_time, end_time)
|
83 |
+
video_clip.write_videofile(output_video, codec="libx264", audio_codec="aac")
|
84 |
+
video_clip.close()
|
85 |
+
|
86 |
+
def save_segments(outdir, name, combined_entries):
|
87 |
+
segments = combined_entries
|
88 |
+
input_video = name
|
89 |
+
for i, segment in enumerate(segments):
|
90 |
+
start_time = segment['start']
|
91 |
+
end_time = segment['end']
|
92 |
+
output_video_file = f'{outdir}/output_segment_{i + 1}.mp4'
|
93 |
+
extract_video_segment(input_video, output_video_file, start_time, end_time)
|
94 |
+
|
95 |
+
def split_up_video(video_path, output_dir):
|
96 |
+
result = generate_timestamps(video_path)
|
97 |
+
combined_entries = combine_entries(get_segment_info(result))
|
98 |
+
|
99 |
+
scribeout = open(f"{output_dir}/transcript.txt", "w")
|
100 |
+
scribeout.write(json.dumps(combined_entries, indent=2, ensure_ascii=False))
|
101 |
+
scribeout.close()
|
102 |
+
|
103 |
+
save_segments(output_dir, video_path, combined_entries)
|
104 |
+
|
105 |
+
filename, extension = os.path.splitext(video_path)
|
106 |
+
os.system(f"zip -r {filename}.zip {output_dir}")
|
107 |
+
|
108 |
+
return f"{filename}.zip"
|
109 |
+
|
110 |
+
# Gradio interface
|
111 |
+
def process_video(video):
|
112 |
+
output_dir = dt.datetime.now().strftime("%Y%m%d%H%M")
|
113 |
+
os.mkdir(output_dir)
|
114 |
+
video_path = video.name
|
115 |
+
output_zip = split_up_video(video_path, output_dir)
|
116 |
+
return output_zip
|
117 |
+
|
118 |
+
iface = gr.Interface(
|
119 |
+
fn=process_video,
|
120 |
+
inputs=gr.File(file_count="single", type="filepath", label="Upload a Video"),
|
121 |
+
outputs=gr.File(label="Download Zipped Segments and Transcript"),
|
122 |
+
title="Video Splitter",
|
123 |
+
description="Upload a video and get a zipped file with segmented videos and a transcript."
|
124 |
+
)
|
125 |
+
|
126 |
+
iface.launch()
|