import io import threading from multiprocessing import Queue from queue import Empty from faster_whisper import WhisperModel class AudioTranscriber(threading.Thread): def __init__(self, audio_queue: "Queue[io.BytesIO]", text_queue: "Queue[str]"): super().__init__() self.audio_queue = audio_queue self.action_queue = text_queue self.daemon = True # Thread will exit when main program exits self.transcriber = WhisperModel( "medium", device="cuda", compute_type="int8", ) def run(self): while True: try: # Wait for 1 second before timing out and checking again audio_chunk = self.audio_queue.get(timeout=1) # Process the audio chunk using the faster-whisper implementation segments, info = self.transcriber.transcribe(audio_chunk, language="fr") # Put the transcription results in the output queue for segment in segments: self.action_queue.put(segment.text) # Still print for debugging print( f"[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text) ) except Empty: continue # If queue is empty, continue waiting except Exception as e: print(f"Error processing audio chunk: {e}")