from flask import Flask, request, jsonify import whisper import os # Initialize Flask app app = Flask(__name__) print("\nHello welcome to SemaBox\n", flush=True) # Load Whisper model model = whisper.load_model("small") def transcribe(audio_path): # Load audio and pad/trim it to fit 30 seconds audio = whisper.load_audio(audio_path) audio = whisper.pad_or_trim(audio) # Make log-Mel spectrogram and move to the same device as the model mel = whisper.log_mel_spectrogram(audio).to(model.device) # Detect the spoken language _, probs = model.detect_language(mel) detected_language = max(probs, key=probs.get) print(f"Detected language: {detected_language}") # Decode the audio options = whisper.DecodingOptions(fp16=False) result = whisper.decode(model, mel, options) return result.text, detected_language # Define the route for transcription @app.route('/transcribe', methods=['POST']) def transcribe_audio(): # Check if an audio file is included in the request if 'audio' not in request.files: return jsonify({"error": "No audio file provided"}), 400 audio_file = request.files['audio'] # Save the uploaded audio file audio_path = os.path.join("temp_audio", audio_file.filename) audio_file.save(audio_path) # Transcribe the audio transcription, language = transcribe(audio_path) # Clean up the saved file os.remove(audio_path) # Return the transcription and detected language return jsonify({"transcription": transcription, "language": language}), 200 # Healthcheck endpoint @app.route('/healthcheck', methods=['GET']) def healthcheck(): return jsonify({"status": "API is running"}), 200 # Run the Flask app if __name__ == '__main__': # Ensure the temp_audio directory exists if not os.path.exists("temp_audio"): os.makedirs("temp_audio") app.run(host="0.0.0.0", port=5000)