openai-whisper-large-v3-turbo / Process data chunk
Sharfraz's picture
Create Process data chunk
38e1f63 verified
raw
history blame contribute delete
413 Bytes
import librosa
# Load and split audio into 30-second chunks
def split_audio(audio_file, chunk_size=30):
audio, sr = librosa.load(audio_file, sr=None)
chunk_samples = chunk_size * sr
return [audio[i:i + chunk_samples] for i in range(0, len(audio), chunk_samples)]
# Process each chunk
for chunk in split_audio("your_audio_file.wav"):
transcription = whisper_model(chunk)
print(transcription)