File size: 1,950 Bytes
15dce1a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
from flask import Flask, request, jsonify
import whisper
import os
# Initialize Flask app
app = Flask(__name__)
print("\nHello welcome to SemaBox\n", flush=True)
# Load Whisper model
model = whisper.load_model("small")
def transcribe(audio_path):
# Load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio_path)
audio = whisper.pad_or_trim(audio)
# Make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# Detect the spoken language
_, probs = model.detect_language(mel)
detected_language = max(probs, key=probs.get)
print(f"Detected language: {detected_language}")
# Decode the audio
options = whisper.DecodingOptions(fp16=False)
result = whisper.decode(model, mel, options)
return result.text, detected_language
# Define the route for transcription
@app.route('/transcribe', methods=['POST'])
def transcribe_audio():
# Check if an audio file is included in the request
if 'audio' not in request.files:
return jsonify({"error": "No audio file provided"}), 400
audio_file = request.files['audio']
# Save the uploaded audio file
audio_path = os.path.join("temp_audio", audio_file.filename)
audio_file.save(audio_path)
# Transcribe the audio
transcription, language = transcribe(audio_path)
# Clean up the saved file
os.remove(audio_path)
# Return the transcription and detected language
return jsonify({"transcription": transcription, "language": language}), 200
# Healthcheck endpoint
@app.route('/healthcheck', methods=['GET'])
def healthcheck():
return jsonify({"status": "API is running"}), 200
# Run the Flask app
if __name__ == '__main__':
# Ensure the temp_audio directory exists
if not os.path.exists("temp_audio"):
os.makedirs("temp_audio")
app.run(host="0.0.0.0", port=5000)
|