GitLab CI commited on
Commit
c27e5a4
·
1 Parent(s): 2e1a626

Update game build from GitLab CI

Browse files
server/ActionProcessor.py CHANGED
@@ -1,8 +1,10 @@
 
1
  import datetime
2
  import json
3
  from threading import Thread
4
  from multiprocessing import Queue
5
- from typing import Dict, Any, List
 
6
  import logging
7
  import sys
8
  from mistralai import Mistral
@@ -31,14 +33,13 @@ class ActionProcessor(Thread):
31
 
32
  def __init__(
33
  self,
34
- text_queue: "Queue[str]",
35
- action_queue: "Queue[Dict[str, Any]]",
36
  mistral_api_key: str,
37
  ):
38
  super().__init__()
39
- self.text_queue = text_queue
40
  self.action_queue = action_queue
41
- self.text_buffers: List[str] = []
42
  self.mistral_client = Mistral(api_key=mistral_api_key)
43
  self.daemon = True # Thread will exit when main program exits
44
 
@@ -102,53 +103,41 @@ Output: ["None", "neutralSentiment"]
102
 
103
  return result.strip()
104
 
105
- def process_text(self, text: str) -> Dict[str, Any] | None:
106
  """Convert text into an action if a complete command is detected."""
107
  # Get sentiment first
108
- self.text_buffers.append(text)
109
 
110
- if len(self.text_buffers) < 3:
 
111
  return None
112
 
113
- if len(self.text_buffers) > 3:
114
- _ = self.text_buffers.pop(0)
115
 
116
- candidate = self.text_buffers[1]
117
-
118
- if len(self.text_buffers[0]) < len(candidate) >= len(self.text_buffers[2]):
119
- action_and_sentiment = json.loads(self.get_action_and_sentiment(candidate))
120
- if (
121
- not isinstance(action_and_sentiment, list)
122
- or len(action_and_sentiment) != 2
123
- ):
124
- return None
125
-
126
- action, sentiment = action_and_sentiment
127
-
128
- if action not in self.valid_action:
129
- action = "None"
130
- return {
131
- "action": action,
132
- "sentiment": sentiment,
133
- "voice": candidate,
134
- "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
135
- }
136
-
137
- return None
138
 
139
  def run(self) -> None:
140
  """Main processing loop."""
141
  while True:
142
  try:
143
  # Get text from queue, blocks until text is available
144
- text = self.text_queue.get()
145
 
146
  # Process the text into an action
 
147
  action = self.process_text(text)
 
 
148
 
149
  # If we got a valid action, add it to the action queue
150
  if action:
151
- self.action_queue.put(action)
152
 
153
  except Exception as e:
154
  logger.error(f"Error processing text: {str(e)}")
 
1
+ from collections import defaultdict
2
  import datetime
3
  import json
4
  from threading import Thread
5
  from multiprocessing import Queue
6
+ import time
7
+ from typing import Dict, Any, List, Tuple
8
  import logging
9
  import sys
10
  from mistralai import Mistral
 
33
 
34
  def __init__(
35
  self,
36
+ text_queue: "Queue[Tuple[str, str]]",
37
+ action_queue: "Queue[Tuple[Dict[str, Any], str]]",
38
  mistral_api_key: str,
39
  ):
40
  super().__init__()
41
+ self.filtered_text_queue = text_queue
42
  self.action_queue = action_queue
 
43
  self.mistral_client = Mistral(api_key=mistral_api_key)
44
  self.daemon = True # Thread will exit when main program exits
45
 
 
103
 
104
  return result.strip()
105
 
106
+ def process_text(self, candidate: str) -> Dict[str, Any] | None:
107
  """Convert text into an action if a complete command is detected."""
108
  # Get sentiment first
 
109
 
110
+ action_and_sentiment = json.loads(self.get_action_and_sentiment(candidate))
111
+ if not isinstance(action_and_sentiment, list) or len(action_and_sentiment) != 2:
112
  return None
113
 
114
+ action, sentiment = action_and_sentiment
 
115
 
116
+ if action not in self.valid_action:
117
+ action = "None"
118
+ return {
119
+ "action": action,
120
+ "sentiment": sentiment,
121
+ "voice": candidate,
122
+ "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
123
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  def run(self) -> None:
126
  """Main processing loop."""
127
  while True:
128
  try:
129
  # Get text from queue, blocks until text is available
130
+ text, session_id = self.filtered_text_queue.get()
131
 
132
  # Process the text into an action
133
+ start_time = time.time()
134
  action = self.process_text(text)
135
+ processing_time = time.time() - start_time
136
+ logger.info(f"{processing_time:.2f}s: {text} -> {action}")
137
 
138
  # If we got a valid action, add it to the action queue
139
  if action:
140
+ self.action_queue.put((action, session_id))
141
 
142
  except Exception as e:
143
  logger.error(f"Error processing text: {str(e)}")
server/AudioTranscriber.py CHANGED
@@ -1,5 +1,5 @@
1
  import io
2
- from typing import List
3
  import threading
4
  from multiprocessing import Queue
5
  from queue import Empty
@@ -22,8 +22,8 @@ logger = logging.getLogger(__name__)
22
  class AudioTranscriber(threading.Thread):
23
  def __init__(
24
  self,
25
- audio_queue: "Queue[io.BytesIO]",
26
- text_queue: "Queue[str]",
27
  language: str = "en",
28
  confidence_threshold: float = 0.5,
29
  ):
@@ -31,10 +31,8 @@ class AudioTranscriber(threading.Thread):
31
  self.audio_queue = audio_queue
32
  self.action_queue = text_queue
33
  self.daemon = True # Thread will exit when main program exits
34
- self.max_buffer_size = 4
35
  self.language = language
36
  self.confidence_threshold = confidence_threshold
37
- self.buffer: List[io.BytesIO] = []
38
  self.transcriber = WhisperModel(
39
  "large",
40
  device="cuda",
@@ -45,32 +43,22 @@ class AudioTranscriber(threading.Thread):
45
  while True:
46
  try:
47
  # Wait for 1 second before timing out and checking again
48
- audio_chunk = self.audio_queue.get(timeout=1)
49
 
50
- self.buffer.append(audio_chunk)
51
-
52
- while len(self.buffer) >= self.max_buffer_size:
53
- _ = self.buffer.pop(0)
54
-
55
- # Create a BytesIO object from the joined buffer
56
- joined_buffer = io.BytesIO(
57
- b"".join([chunk.getvalue() for chunk in self.buffer])
58
- )
59
-
60
- segments, info = self.transcriber.transcribe(
61
- joined_buffer, language=self.language
62
  )
63
 
64
  # Put the transcription results in the output queue
65
  for segment in segments:
66
  if segment.no_speech_prob <= self.confidence_threshold:
67
- self.action_queue.put(segment.text)
68
  # Still print for debugging
69
  logger.info(
70
- f"[{segment.start:.2f}s -> {segment.end:.2f}s] {segment.text}"
71
  )
72
  else:
73
- self.action_queue.put("")
74
 
75
  except Empty:
76
  continue # If queue is empty, continue waiting
 
1
  import io
2
+ from typing import Tuple
3
  import threading
4
  from multiprocessing import Queue
5
  from queue import Empty
 
22
  class AudioTranscriber(threading.Thread):
23
  def __init__(
24
  self,
25
+ audio_queue: "Queue[Tuple[io.BytesIO, str]]",
26
+ text_queue: "Queue[Tuple[str, str]]",
27
  language: str = "en",
28
  confidence_threshold: float = 0.5,
29
  ):
 
31
  self.audio_queue = audio_queue
32
  self.action_queue = text_queue
33
  self.daemon = True # Thread will exit when main program exits
 
34
  self.language = language
35
  self.confidence_threshold = confidence_threshold
 
36
  self.transcriber = WhisperModel(
37
  "large",
38
  device="cuda",
 
43
  while True:
44
  try:
45
  # Wait for 1 second before timing out and checking again
46
+ audio_data, session_id = self.audio_queue.get(timeout=1)
47
 
48
+ segments, _ = self.transcriber.transcribe(
49
+ audio_data, language=self.language
 
 
 
 
 
 
 
 
 
 
50
  )
51
 
52
  # Put the transcription results in the output queue
53
  for segment in segments:
54
  if segment.no_speech_prob <= self.confidence_threshold:
55
+ self.action_queue.put((segment.text, session_id))
56
  # Still print for debugging
57
  logger.info(
58
+ f"[Thread {threading.get_ident()}] [{segment.start:.2f}s -> {segment.end:.2f}s] {segment.text}"
59
  )
60
  else:
61
+ self.action_queue.put(("", session_id))
62
 
63
  except Empty:
64
  continue # If queue is empty, continue waiting
server/TextFilterer.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Thread
2
+ from multiprocessing import Queue
3
+ from typing import Tuple, Dict, List
4
+ from collections import defaultdict
5
+
6
+ import logging
7
+ import sys
8
+
9
+ # Configure logging
10
+ logging.basicConfig(
11
+ level=logging.INFO,
12
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
13
+ handlers=[logging.StreamHandler(sys.stdout)],
14
+ )
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class TextFilterer(Thread):
20
+ def __init__(
21
+ self,
22
+ text_queue: "Queue[Tuple[str, str]]",
23
+ filtered_text_queue: "Queue[Tuple[str, str]]",
24
+ ):
25
+ super().__init__()
26
+ self.text_queue = text_queue
27
+ self.filtered_text_queue = filtered_text_queue
28
+ self.daemon = True # Thread will exit when main program exits
29
+ self.text_buffers: Dict[str, List[str]] = defaultdict(list)
30
+ self.max_buffer_size = 5
31
+
32
+ def filter_text(self, text: str, session_id: str) -> str | None:
33
+ self.text_buffers[session_id].append(text)
34
+
35
+ if len(self.text_buffers[session_id]) < self.max_buffer_size:
36
+ return None
37
+
38
+ while len(self.text_buffers[session_id]) > self.max_buffer_size:
39
+ _ = self.text_buffers[session_id].pop(0)
40
+
41
+ candidate = self.text_buffers[session_id][-2]
42
+ print(f"Candidate: {candidate}")
43
+
44
+ if (
45
+ len(self.text_buffers[session_id][-3])
46
+ < len(candidate)
47
+ >= len(self.text_buffers[session_id][-1])
48
+ ):
49
+ for past in self.text_buffers[session_id][:-2]:
50
+ if candidate == past:
51
+ return None
52
+ return candidate
53
+
54
+ return None
55
+
56
+ def run(self) -> None:
57
+ """Main processing loop."""
58
+ while True:
59
+ try:
60
+ # Get text from queue, blocks until text is available
61
+ text, session_id = self.text_queue.get()
62
+
63
+ # Process the text into an action
64
+ filtered_text = self.filter_text(text, session_id)
65
+
66
+ # If we got a valid action, add it to the action queue
67
+ if filtered_text:
68
+ self.filtered_text_queue.put((filtered_text, session_id))
69
+
70
+ except Exception as e:
71
+ logger.error(f"Error processing text: {str(e)}")
72
+ continue
server/__main__.py CHANGED
@@ -4,14 +4,17 @@ import os
4
  from flask_cors import CORS
5
  from multiprocessing import Queue
6
  import base64
7
- from typing import Any, List, Dict, Tuple
8
  from multiprocessing import Queue
9
  import logging
10
  import sys
 
 
11
 
12
  from server.AudioTranscriber import AudioTranscriber
13
  from server.ActionProcessor import ActionProcessor
14
  from server.StandaloneApplication import StandaloneApplication
 
15
 
16
  # Configure logging
17
  logging.basicConfig(
@@ -25,14 +28,19 @@ logger = logging.getLogger(__name__)
25
 
26
  # Use a directory in the user's home folder for static files
27
  STATIC_DIR = (
28
- "/app/server/static"
29
- if os.getenv("DEBUG") != "true"
30
- else "/home/gab/work/gogogo/html"
31
  )
32
 
33
- audio_queue: "Queue[io.BytesIO]" = Queue()
34
- text_queue: "Queue[str]" = Queue()
35
- action_queue: "Queue[Dict[str, Any]]" = Queue()
 
 
 
 
 
 
 
36
 
37
  app = Flask(__name__, static_folder=STATIC_DIR)
38
 
@@ -103,11 +111,13 @@ def post_order() -> Tuple[Response, int]:
103
 
104
  action_text: str = data["action"]
105
 
 
 
106
  mid_split = len(action_text) // 2
107
  # Add the text to the queue
108
- text_queue.put(action_text[:mid_split])
109
- text_queue.put(action_text)
110
- text_queue.put(action_text[mid_split:])
111
 
112
  return jsonify({"status": "success"}), 200
113
 
@@ -125,6 +135,7 @@ def process_data():
125
  try:
126
  # Check content type
127
  content_type = request.headers.get("Content-Type", "")
 
128
 
129
  # Handle different content types
130
  if "application/json" in content_type:
@@ -158,7 +169,7 @@ def process_data():
158
  )
159
 
160
  # Put the audio chunk in the queue for processing
161
- audio_queue.put(io.BytesIO(audio_chunk))
162
 
163
  return jsonify(
164
  {
@@ -176,15 +187,13 @@ def process_data():
176
 
177
  @app.route("/api/actions", methods=["GET"])
178
  def get_actions() -> Tuple[Response, int]:
179
- """Retrieve and clear all pending actions from the queue"""
180
- actions: List[Dict[str, Any]] = []
181
 
182
- # Drain the queue into our actions list
183
- while not action_queue.empty():
184
- try:
185
- actions.append(action_queue.get_nowait())
186
- except Exception:
187
- break
188
 
189
  return jsonify({"actions": actions, "status": "success"}), 200
190
 
@@ -197,6 +206,31 @@ def serve_static(path: str):
197
  abort(404, description=f"File {path} not found in static folder")
198
 
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  if __name__ == "__main__":
201
  if os.path.exists(app.static_folder):
202
  logger.info(f"Static folder contents: {os.listdir(app.static_folder)}")
@@ -204,16 +238,31 @@ if __name__ == "__main__":
204
  os.makedirs(app.static_folder, exist_ok=True)
205
 
206
  # Start the audio transcriber thread
207
- transcriber = AudioTranscriber(audio_queue, text_queue)
208
- transcriber.start()
 
 
 
 
 
 
 
 
209
 
210
  # Start the action processor thread
211
  MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
212
  if not MISTRAL_API_KEY:
213
  raise ValueError("MISTRAL_API_KEY is not set")
214
 
215
- action_processor = ActionProcessor(text_queue, action_queue, MISTRAL_API_KEY)
216
- action_processor.start()
 
 
 
 
 
 
 
217
 
218
  options: Any = {
219
  "bind": "0.0.0.0:7860",
 
4
  from flask_cors import CORS
5
  from multiprocessing import Queue
6
  import base64
7
+ from typing import Any, Dict, Tuple
8
  from multiprocessing import Queue
9
  import logging
10
  import sys
11
+ from threading import Lock
12
+ from multiprocessing import Manager
13
 
14
  from server.AudioTranscriber import AudioTranscriber
15
  from server.ActionProcessor import ActionProcessor
16
  from server.StandaloneApplication import StandaloneApplication
17
+ from server.TextFilterer import TextFilterer
18
 
19
  # Configure logging
20
  logging.basicConfig(
 
28
 
29
  # Use a directory in the user's home folder for static files
30
  STATIC_DIR = (
31
+ "/app/server/static" if os.getenv("DEBUG") != "True" else "/mnt/UE/work/godot/html"
 
 
32
  )
33
 
34
+ # Each packet is a tuple of (data, token)
35
+ audio_queue: "Queue[Tuple[io.BytesIO, str]]" = Queue()
36
+ text_queue: "Queue[Tuple[str, str]]" = Queue()
37
+ filtered_text_queue: "Queue[Tuple[str, str]]" = Queue()
38
+ action_queue: "Queue[Tuple[Dict[str, Any], str]]" = Queue()
39
+
40
+ # Thread-safe storage for actions by session
41
+ action_storage_lock = Lock()
42
+ manager = Manager()
43
+ action_storage = manager.dict() # Shared dictionary across processes
44
 
45
  app = Flask(__name__, static_folder=STATIC_DIR)
46
 
 
111
 
112
  action_text: str = data["action"]
113
 
114
+ token: str = request.headers.get("Authorization").split(" ")[1]
115
+
116
  mid_split = len(action_text) // 2
117
  # Add the text to the queue
118
+ text_queue.put((action_text[:mid_split], token))
119
+ text_queue.put((action_text, token))
120
+ text_queue.put((action_text[mid_split:], token))
121
 
122
  return jsonify({"status": "success"}), 200
123
 
 
135
  try:
136
  # Check content type
137
  content_type = request.headers.get("Content-Type", "")
138
+ token: str = request.headers.get("Authorization").split(" ")[1]
139
 
140
  # Handle different content types
141
  if "application/json" in content_type:
 
169
  )
170
 
171
  # Put the audio chunk in the queue for processing
172
+ audio_queue.put((io.BytesIO(audio_chunk), token))
173
 
174
  return jsonify(
175
  {
 
187
 
188
  @app.route("/api/actions", methods=["GET"])
189
  def get_actions() -> Tuple[Response, int]:
190
+ """Retrieve and clear all pending actions for the current session"""
191
+ token: str = request.headers.get("Authorization", "").split(" ")[1]
192
 
193
+ with action_storage_lock:
194
+ # Get and clear actions for this session
195
+ actions = action_storage.get(token, [])
196
+ action_storage[token] = []
 
 
197
 
198
  return jsonify({"actions": actions, "status": "success"}), 200
199
 
 
206
  abort(404, description=f"File {path} not found in static folder")
207
 
208
 
209
+ class ActionConsumer:
210
+ def __init__(self, action_queue: Queue):
211
+ self.action_queue = action_queue
212
+ self.running = True
213
+
214
+ def start(self):
215
+ import threading
216
+
217
+ self.thread = threading.Thread(target=self.run, daemon=True)
218
+ self.thread.start()
219
+
220
+ def run(self):
221
+ while self.running:
222
+ try:
223
+ action, token = self.action_queue.get()
224
+ with action_storage_lock:
225
+ if token not in action_storage:
226
+ action_storage[token] = []
227
+ current_actions = action_storage[token]
228
+ current_actions.append(action)
229
+ action_storage[token] = current_actions
230
+ except Exception as e:
231
+ logger.error(f"Error in ActionConsumer: {e}")
232
+
233
+
234
  if __name__ == "__main__":
235
  if os.path.exists(app.static_folder):
236
  logger.info(f"Static folder contents: {os.listdir(app.static_folder)}")
 
238
  os.makedirs(app.static_folder, exist_ok=True)
239
 
240
  # Start the audio transcriber thread
241
+ transcribers = [
242
+ AudioTranscriber(audio_queue, text_queue)
243
+ for _ in range(4 if os.getenv("DEBUG") == "True" else 32)
244
+ ]
245
+ for transcriber in transcribers:
246
+ transcriber.start()
247
+
248
+ # Start the action consumer thread
249
+ action_consumer = ActionConsumer(action_queue)
250
+ action_consumer.start()
251
 
252
  # Start the action processor thread
253
  MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
254
  if not MISTRAL_API_KEY:
255
  raise ValueError("MISTRAL_API_KEY is not set")
256
 
257
+ filterer = TextFilterer(text_queue, filtered_text_queue)
258
+ filterer.start()
259
+
260
+ actions_processors = [
261
+ ActionProcessor(filtered_text_queue, action_queue, MISTRAL_API_KEY)
262
+ for _ in range(4 if os.getenv("DEBUG") == "True" else 16)
263
+ ]
264
+ for actions_processor in actions_processors:
265
+ actions_processor.start()
266
 
267
  options: Any = {
268
  "bind": "0.0.0.0:7860",
server/static/godot/index.html CHANGED
@@ -97,7 +97,7 @@ body {
97
 
98
  <script src="index.js"></script>
99
  <script>
100
- const GODOT_CONFIG = {"args":[],"canvasResizePolicy":2,"ensureCrossOriginIsolationHeaders":false,"executable":"index","experimentalVK":false,"fileSizes":{"index.pck":18582528,"index.wasm":35376909},"focusCanvas":true,"gdextensionLibs":[]};
101
  const GODOT_THREADS_ENABLED = false;
102
  const engine = new Engine(GODOT_CONFIG);
103
 
 
97
 
98
  <script src="index.js"></script>
99
  <script>
100
+ const GODOT_CONFIG = {"args":[],"canvasResizePolicy":2,"ensureCrossOriginIsolationHeaders":false,"executable":"index","experimentalVK":false,"fileSizes":{"index.pck":18582560,"index.wasm":35376909},"focusCanvas":true,"gdextensionLibs":[]};
101
  const GODOT_THREADS_ENABLED = false;
102
  const engine = new Engine(GODOT_CONFIG);
103
 
server/static/godot/index.pck CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aed0401261f4e6874f34383b267790e2465c030c957dc078a8eebb7bdc5a6e3f
3
- size 18582528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec1cd2b27c61d036b88d4e08696c9f029e01aee83777894e465bc51906e9be54
3
+ size 18582560
server/static/index.html CHANGED
@@ -41,8 +41,6 @@
41
 
42
  // Écouter les messages venant de l'iframe
43
  window.addEventListener('message', function (event) {
44
- // Vérifier que le message a la structure attendue
45
- console.log(event.data)
46
  if (event.data?.type === 'game_token') {
47
  token = event.data.data
48
  console.log("Token reçu :", token)
@@ -70,8 +68,6 @@
70
  const errorMessage = error.name === 'AbortError'
71
  ? 'Server request timed out. Please try again later.'
72
  : 'Could not connect to the server. Please try again later.'
73
- console.error('Server check failed:', error)
74
- console.error(errorMessage)
75
  throw error
76
  })
77
 
@@ -88,21 +84,23 @@
88
  .then(stream => {
89
  const audioContext = new (window.AudioContext || window.webkitAudioContext)()
90
  const mediaRecorder = new MediaRecorder(stream)
91
- const audioChunks = []
 
92
 
93
  mediaRecorder.ondataavailable = event => {
94
- audioChunks.push(event.data)
95
- }
 
 
 
96
 
97
- mediaRecorder.onstop = () => {
98
- const audioBlob = new Blob(audioChunks, { type: 'audio/webm' })
99
- audioChunks.length = 0 // Clear chunks after creating the Blob
100
 
101
- // Convert Blob to base64
102
  const reader = new FileReader()
103
- reader.readAsDataURL(audioBlob)
104
  reader.onloadend = () => {
105
- // Extract the base64 data (remove the data URL prefix)
106
  const base64Audio = reader.result.split(',')[1]
107
 
108
  // Send as JSON with base64-encoded audio
@@ -112,7 +110,8 @@
112
  fetch(serverUrl, {
113
  method: 'POST',
114
  headers: {
115
- 'Content-Type': 'application/json'
 
116
  },
117
  body: JSON.stringify({
118
  audio_chunk: base64Audio
@@ -123,20 +122,16 @@
123
  console.log('Audio chunk sent successfully')
124
  }).catch(error => {
125
  clearTimeout(audioTimeout)
126
- console.error('Error sending audio chunk:', error)
127
  })
128
  }
129
  }
130
 
131
  // Start recording in intervals
132
- const chunkInterval = 700 // Chunk duration in milliseconds
 
133
  setInterval(() => {
134
- if (mediaRecorder.state === 'recording') {
135
- mediaRecorder.stop()
136
- mediaRecorder.start()
137
- } else {
138
- mediaRecorder.start()
139
- }
140
  }, chunkInterval)
141
  })
142
  .catch(error => {
 
41
 
42
  // Écouter les messages venant de l'iframe
43
  window.addEventListener('message', function (event) {
 
 
44
  if (event.data?.type === 'game_token') {
45
  token = event.data.data
46
  console.log("Token reçu :", token)
 
68
  const errorMessage = error.name === 'AbortError'
69
  ? 'Server request timed out. Please try again later.'
70
  : 'Could not connect to the server. Please try again later.'
 
 
71
  throw error
72
  })
73
 
 
84
  .then(stream => {
85
  const audioContext = new (window.AudioContext || window.webkitAudioContext)()
86
  const mediaRecorder = new MediaRecorder(stream)
87
+ const audioBuffer = [] // Buffer to store last 5 recordings
88
+ const MAX_BUFFER_SIZE = 12
89
 
90
  mediaRecorder.ondataavailable = event => {
91
+ // Add new chunk to buffer and maintain max size
92
+ audioBuffer.push(event.data)
93
+ if (audioBuffer.length > MAX_BUFFER_SIZE) {
94
+ audioBuffer.shift() // Remove oldest chunk
95
+ }
96
 
97
+ // Merge all blobs in buffer
98
+ const mergedBlob = new Blob(audioBuffer, { type: 'audio/webm' })
 
99
 
100
+ // Convert merged Blob to base64
101
  const reader = new FileReader()
102
+ reader.readAsDataURL(mergedBlob)
103
  reader.onloadend = () => {
 
104
  const base64Audio = reader.result.split(',')[1]
105
 
106
  // Send as JSON with base64-encoded audio
 
110
  fetch(serverUrl, {
111
  method: 'POST',
112
  headers: {
113
+ 'Content-Type': 'application/json',
114
+ 'Authorization': `Bearer ${token}`
115
  },
116
  body: JSON.stringify({
117
  audio_chunk: base64Audio
 
122
  console.log('Audio chunk sent successfully')
123
  }).catch(error => {
124
  clearTimeout(audioTimeout)
 
125
  })
126
  }
127
  }
128
 
129
  // Start recording in intervals
130
+ const chunkInterval = 300 // Chunk duration in milliseconds
131
+ mediaRecorder.start()
132
  setInterval(() => {
133
+ mediaRecorder.stop()
134
+ mediaRecorder.start()
 
 
 
 
135
  }, chunkInterval)
136
  })
137
  .catch(error => {