serliaze
Browse files- client/src/main.py +3 -1
client/src/main.py
CHANGED
|
@@ -2,6 +2,7 @@ from queue import Queue
|
|
| 2 |
import logging
|
| 3 |
from datetime import UTC, datetime, timedelta
|
| 4 |
from time import sleep
|
|
|
|
| 5 |
|
| 6 |
import speech_recognition as sr
|
| 7 |
|
|
@@ -46,7 +47,8 @@ def main():
|
|
| 46 |
audio_np_array = to_audio_array(audio_data)
|
| 47 |
|
| 48 |
if current_audio_chunk.is_complete:
|
| 49 |
-
|
|
|
|
| 50 |
# text = transcribe_model.transcribe(current_audio_chunk.audio_array)
|
| 51 |
# sentence = Sentence(
|
| 52 |
# start_time=current_audio_chunk.start_time, end_time=current_audio_chunk.end_time, text=text
|
|
|
|
| 2 |
import logging
|
| 3 |
from datetime import UTC, datetime, timedelta
|
| 4 |
from time import sleep
|
| 5 |
+
import pickle
|
| 6 |
|
| 7 |
import speech_recognition as sr
|
| 8 |
|
|
|
|
| 47 |
audio_np_array = to_audio_array(audio_data)
|
| 48 |
|
| 49 |
if current_audio_chunk.is_complete:
|
| 50 |
+
serialized = pickle.dumps(current_audio_chunk.audio_array)
|
| 51 |
+
print('chunk done', serialized)
|
| 52 |
# text = transcribe_model.transcribe(current_audio_chunk.audio_array)
|
| 53 |
# sentence = Sentence(
|
| 54 |
# start_time=current_audio_chunk.start_time, end_time=current_audio_chunk.end_time, text=text
|