Pijush2023 commited on
Commit
b79ab7b
·
verified ·
1 Parent(s): 3717130

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -173,7 +173,7 @@ def fetch_local_weather():
173
  }}
174
  .weather-content {{
175
  display: flex;
176
- align-items: center;
177
  }}
178
  .weather-icon {{
179
  flex: 1;
@@ -284,16 +284,14 @@ def bot(history, choice, tts_model):
284
 
285
  # Generate audio and process output prompt in parallel
286
  with concurrent.futures.ThreadPoolExecutor() as executor:
287
- audio_future = executor.submit(generate_audio_chunks, tts_model, response)
288
  text_chunks = [response[i:i + 100] for i in range(0, len(response), 100)] # Chunk the text for streaming
289
- for text_chunk in text_chunks:
 
 
290
  history[-1][1] += text_chunk
291
- audio_chunk = next(audio_future.result(), None)
292
- time.sleep(0.2) # Adjust this to synchronize text and audio appearance
293
  yield history, audio_chunk
294
-
295
- for remaining_audio_chunk in audio_future.result():
296
- yield history, remaining_audio_chunk
297
 
298
  def generate_audio_chunks(tts_model, text):
299
  if tts_model == "ElevenLabs":
@@ -320,12 +318,16 @@ def generate_audio_elevenlabs_chunks(text):
320
  }
321
  }
322
  response = requests.post(tts_url, headers=headers, json=data, stream=True)
 
323
  if response.ok:
324
- for chunk in response.iter_content(chunk_size=1024):
325
- yield chunk
 
 
 
326
  else:
327
  logging.error(f"Error generating audio: {response.text}")
328
- return None
329
 
330
  def generate_audio_parler_tts_chunks(text):
331
  model_id = 'parler-tts/parler_tts_mini_v0.1'
@@ -347,11 +349,13 @@ def generate_audio_parler_tts_chunks(text):
347
  audio_arr = generation.cpu().numpy().squeeze()
348
 
349
  chunk_size = 16000 # Define a chunk size (adjust as needed)
 
350
  for i in range(0, len(audio_arr), chunk_size):
351
  audio_chunk = audio_arr[i:i + chunk_size]
352
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
353
  sf.write(f.name, audio_chunk, model.config.sampling_rate)
354
- yield f.name
 
355
 
356
  def add_message(history, message):
357
  history.append((message, None))
 
173
  }}
174
  .weather-content {{
175
  display: flex;
176
+ align-items: center.
177
  }}
178
  .weather-icon {{
179
  flex: 1;
 
284
 
285
  # Generate audio and process output prompt in parallel
286
  with concurrent.futures.ThreadPoolExecutor() as executor:
287
+ audio_chunks_future = executor.submit(generate_audio_chunks, tts_model, response)
288
  text_chunks = [response[i:i + 100] for i in range(0, len(response), 100)] # Chunk the text for streaming
289
+
290
+ audio_chunks = audio_chunks_future.result()
291
+ for text_chunk, audio_chunk in zip(text_chunks, audio_chunks):
292
  history[-1][1] += text_chunk
 
 
293
  yield history, audio_chunk
294
+ time.sleep(0.2) # Adjust this to synchronize text and audio appearance
 
 
295
 
296
  def generate_audio_chunks(tts_model, text):
297
  if tts_model == "ElevenLabs":
 
318
  }
319
  }
320
  response = requests.post(tts_url, headers=headers, json=data, stream=True)
321
+ audio_chunks = []
322
  if response.ok:
323
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
324
+ for chunk in response.iter_content(chunk_size=1024):
325
+ f.write(chunk)
326
+ audio_chunks.append(f.name)
327
+ return audio_chunks
328
  else:
329
  logging.error(f"Error generating audio: {response.text}")
330
+ return []
331
 
332
  def generate_audio_parler_tts_chunks(text):
333
  model_id = 'parler-tts/parler_tts_mini_v0.1'
 
349
  audio_arr = generation.cpu().numpy().squeeze()
350
 
351
  chunk_size = 16000 # Define a chunk size (adjust as needed)
352
+ audio_chunks = []
353
  for i in range(0, len(audio_arr), chunk_size):
354
  audio_chunk = audio_arr[i:i + chunk_size]
355
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
356
  sf.write(f.name, audio_chunk, model.config.sampling_rate)
357
+ audio_chunks.append(f.name)
358
+ return audio_chunks
359
 
360
  def add_message(history, message):
361
  history.append((message, None))