Update custom_interface_app.py
Browse files- custom_interface_app.py +6 -6
custom_interface_app.py
CHANGED
@@ -137,7 +137,7 @@ class ASR(Pretrained):
|
|
137 |
# Get audio length in seconds
|
138 |
audio_length = len(waveform) / sr
|
139 |
|
140 |
-
if audio_length >=
|
141 |
print(f"Audio is too long ({audio_length:.2f} seconds), splitting into segments")
|
142 |
# Detect non-silent segments
|
143 |
|
@@ -146,7 +146,7 @@ class ASR(Pretrained):
|
|
146 |
segments = []
|
147 |
current_segment = []
|
148 |
current_length = 0
|
149 |
-
max_duration =
|
150 |
|
151 |
|
152 |
for interval in non_silent_intervals:
|
@@ -204,7 +204,7 @@ class ASR(Pretrained):
|
|
204 |
# Get audio length in seconds
|
205 |
audio_length = len(waveform) / sr
|
206 |
|
207 |
-
if audio_length >=
|
208 |
print(f"Audio is too long ({audio_length:.2f} seconds), splitting into segments")
|
209 |
# Detect non-silent segments
|
210 |
|
@@ -213,7 +213,7 @@ class ASR(Pretrained):
|
|
213 |
segments = []
|
214 |
current_segment = []
|
215 |
current_length = 0
|
216 |
-
max_duration =
|
217 |
|
218 |
|
219 |
for interval in non_silent_intervals:
|
@@ -273,7 +273,7 @@ class ASR(Pretrained):
|
|
273 |
# Get audio length in seconds
|
274 |
audio_length = len(waveform) / sr
|
275 |
|
276 |
-
if audio_length >=
|
277 |
print(f"MMS Audio is too long ({audio_length:.2f} seconds), splitting into segments")
|
278 |
# Detect non-silent segments
|
279 |
non_silent_intervals = librosa.effects.split(waveform, top_db=20) # Adjust top_db for sensitivity
|
@@ -281,7 +281,7 @@ class ASR(Pretrained):
|
|
281 |
segments = []
|
282 |
current_segment = []
|
283 |
current_length = 0
|
284 |
-
max_duration =
|
285 |
|
286 |
|
287 |
for interval in non_silent_intervals:
|
|
|
137 |
# Get audio length in seconds
|
138 |
audio_length = len(waveform) / sr
|
139 |
|
140 |
+
if audio_length >= 20:
|
141 |
print(f"Audio is too long ({audio_length:.2f} seconds), splitting into segments")
|
142 |
# Detect non-silent segments
|
143 |
|
|
|
146 |
segments = []
|
147 |
current_segment = []
|
148 |
current_length = 0
|
149 |
+
max_duration = 20 * sr # Maximum segment duration in samples (20 seconds)
|
150 |
|
151 |
|
152 |
for interval in non_silent_intervals:
|
|
|
204 |
# Get audio length in seconds
|
205 |
audio_length = len(waveform) / sr
|
206 |
|
207 |
+
if audio_length >= 20:
|
208 |
print(f"Audio is too long ({audio_length:.2f} seconds), splitting into segments")
|
209 |
# Detect non-silent segments
|
210 |
|
|
|
213 |
segments = []
|
214 |
current_segment = []
|
215 |
current_length = 0
|
216 |
+
max_duration = 20 * sr # Maximum segment duration in samples (20 seconds)
|
217 |
|
218 |
|
219 |
for interval in non_silent_intervals:
|
|
|
273 |
# Get audio length in seconds
|
274 |
audio_length = len(waveform) / sr
|
275 |
|
276 |
+
if audio_length >= 20:
|
277 |
print(f"MMS Audio is too long ({audio_length:.2f} seconds), splitting into segments")
|
278 |
# Detect non-silent segments
|
279 |
non_silent_intervals = librosa.effects.split(waveform, top_db=20) # Adjust top_db for sensitivity
|
|
|
281 |
segments = []
|
282 |
current_segment = []
|
283 |
current_length = 0
|
284 |
+
max_duration = 20 * sr # Maximum segment duration in samples (20 seconds)
|
285 |
|
286 |
|
287 |
for interval in non_silent_intervals:
|