Spaces:
Runtime error
Runtime error
Commit
·
6ee526e
1
Parent(s):
e385e48
fix model size
Browse files- src/se_extractor.py +4 -4
src/se_extractor.py
CHANGED
@@ -9,6 +9,7 @@ from whisper_timestamped.transcribe import get_audio_tensor, get_vad_segments
|
|
9 |
|
10 |
# Run on GPU with FP16
|
11 |
model = None
|
|
|
12 |
def split_audio_whisper(audio_path, target_dir='processed',needs_offset=True):
|
13 |
print("in whisper split")
|
14 |
model = WhisperModel('medium', device="cuda:0", compute_type="float16")
|
@@ -156,13 +157,12 @@ def generate_voice_segments(audio_path, target_dir='processed', vad=True):
|
|
156 |
|
157 |
|
158 |
def load_model():
|
159 |
-
model = WhisperModel(
|
160 |
|
161 |
|
162 |
def extract_segments_to_cut_audio(max_duration,audio_path,target_dir='processed'):
|
163 |
-
|
164 |
-
|
165 |
-
model = WhisperModel(model_size, device="cuda", compute_type="float16")
|
166 |
audio = AudioSegment.from_file(audio_path)
|
167 |
max_len = len(audio)
|
168 |
|
|
|
9 |
|
10 |
# Run on GPU with FP16
|
11 |
model = None
|
12 |
+
model_size = 'medium'
|
13 |
def split_audio_whisper(audio_path, target_dir='processed',needs_offset=True):
|
14 |
print("in whisper split")
|
15 |
model = WhisperModel('medium', device="cuda:0", compute_type="float16")
|
|
|
157 |
|
158 |
|
159 |
def load_model():
|
160 |
+
model = WhisperModel('medium', device="cpu", compute_type="int8")
|
161 |
|
162 |
|
163 |
def extract_segments_to_cut_audio(max_duration,audio_path,target_dir='processed'):
|
164 |
+
|
165 |
+
model = WhisperModel('medium', device="cuda:0", compute_type="float16")
|
|
|
166 |
audio = AudioSegment.from_file(audio_path)
|
167 |
max_len = len(audio)
|
168 |
|