import spaces import torch import gradio as gr from transformers import pipeline from transformers.pipelines.audio_utils import ffmpeg_read import tempfile import os import json #=============== # Define main parameters #=============== MODEL_NAME = "openai/whisper-large-v3-turbo" BATCH_SIZE = 8 FILE_LIMIT_MB = 1000 YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files device = 0 if torch.cuda.is_available() else "cpu" pipe = pipeline( task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=30, device=device, ) #=============== # Main functions #=============== @spaces.GPU def transcribe(audio_file, task, language, keywords): if audio_file is None: raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.") result = pipe(audio_file, batch_size=BATCH_SIZE, generate_kwargs={"task": task, "language": language}, return_timestamps=True) text = result["text"] timestamps = result["chunks"] # First 200 characters for display preview_text = text[:200] + "..." if len(text) > 200 else text # Full transcription with timestamps in JSON full_transcription = { "text": text, "timestamps": timestamps } # Save the full transcription (with timestamps) as JSON json_file_path = tempfile.NamedTemporaryFile(delete=False, suffix=".json").name with open(json_file_path, "w") as json_file: json.dump(full_transcription, json_file) # Save the plain text transcription as TXT txt_file_path = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name with open(txt_file_path, "w") as txt_file: txt_file.write(text) return preview_text, json_file_path, txt_file_path #=============== # Build the frontend #=============== file_transcribe = gr.Interface( fn=transcribe, inputs=[ gr.Audio(sources="upload", type="filepath", label="Audio file"), gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"), gr.Dropdown(["spanish", "english"], label="Language", info="Will add more later!", value="spanish"), gr.Textbox(lines=10, label="Keywords"), ], outputs=[ gr.Textbox(label="Preview (first 200 characters)"), gr.File(label="Download full transcription as JSON"), gr.File(label="Download transcription as TXT") ], title="Whisper Large V3: Transcribe Audio", description=( "Transcribe long-form audio inputs with the click of a button! Demo uses the" f" checkpoint [{MODEL_NAME}](https://huggingface.co./{MODEL_NAME}) and 🤗 Transformers to transcribe audio files" " of arbitrary length." ), allow_flagging="never", ) #=============== # Launch #=============== demo = gr.Blocks(theme=gr.themes.Ocean()) with demo: gr.TabbedInterface([file_transcribe], ["Audio file"]) demo.queue().launch(ssr_mode=False)