File size: 1,492 Bytes
726ec90
 
 
 
 
 
 
 
6831f1f
726ec90
 
6831f1f
726ec90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import io
import threading
from multiprocessing import Queue
from queue import Empty
from faster_whisper import WhisperModel


class AudioTranscriber(threading.Thread):
    def __init__(self, audio_queue: "Queue[io.BytesIO]", text_queue: "Queue[str]"):
        super().__init__()
        self.audio_queue = audio_queue
        self.action_queue = text_queue
        self.daemon = True  # Thread will exit when main program exits

        self.transcriber = WhisperModel(
            "medium",
            device="cuda",
            compute_type="int8",
        )

    def run(self):
        while True:
            try:
                # Wait for 1 second before timing out and checking again
                audio_chunk = self.audio_queue.get(timeout=1)

                # Process the audio chunk using the faster-whisper implementation
                segments, info = self.transcriber.transcribe(audio_chunk, language="fr")

                # Put the transcription results in the output queue
                for segment in segments:
                    self.action_queue.put(segment.text)
                    # Still print for debugging
                    print(
                        f"[%.2fs -> %.2fs] %s"
                        % (segment.start, segment.end, segment.text)
                    )

            except Empty:
                continue  # If queue is empty, continue waiting
            except Exception as e:
                print(f"Error processing audio chunk: {e}")