Pijush2023 commited on
Commit
4b344c0
·
verified ·
1 Parent(s): 20478b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -85
app.py CHANGED
@@ -351,23 +351,52 @@ def generate_answer(message, choice):
351
  addresses = extract_addresses(response['output'])
352
  return response['output'], addresses
353
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  def bot(history, choice, tts_choice):
355
  if not history:
356
  return history
357
- response, addresses = generate_answer(history[-1][0], choice)
358
- history[-1][1] = ""
359
 
 
 
 
 
 
 
 
 
 
360
  with concurrent.futures.ThreadPoolExecutor() as executor:
361
  if tts_choice == "Alpha":
362
- audio_future = executor.submit(generate_audio_elevenlabs, response)
363
  elif tts_choice == "Beta":
364
- audio_future = executor.submit(generate_audio_parler_tts, response)
365
  elif tts_choice == "Gamma":
366
- audio_future = executor.submit(generate_audio_mars5, response)
367
- elif tts_choice == "Delta":
368
- audio_future = executor.submit(generate_audio_fishaudio, response)
369
 
370
- for character in response:
371
  history[-1][1] += character
372
  time.sleep(0.05)
373
  yield history, None
@@ -375,85 +404,15 @@ def bot(history, choice, tts_choice):
375
  audio_path = audio_future.result()
376
  yield history, audio_path
377
 
 
 
 
 
378
  def add_message(history, message):
379
- history.append((message, None))
 
380
  return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
381
- #----------------------------------part 1-------------------------
382
- # def generate_answer(message, choice):
383
- # logging.debug(f"generate_answer called with prompt_choice: {choice}")
384
-
385
- # if choice == "Details":
386
- # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
387
- # elif choice == "Conversational":
388
- # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
389
- # else:
390
- # logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
391
- # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
392
- # response = agent(message)
393
 
394
- # addresses = extract_addresses(response['output'])
395
- # return response['output'], addresses
396
-
397
-
398
-
399
- # # def bot(history, choice, tts_choice):
400
- # # if not history:
401
- # # return history
402
- # # response_pair, addresses = generate_answer(history[-1][0], choice)
403
- # # history[-1] = response_pair # Update bot response correctly
404
-
405
- # # with concurrent.futures.ThreadPoolExecutor() as executor:
406
- # # if tts_choice == "Alpha":
407
- # # audio_future = executor.submit(generate_audio_elevenlabs, response_pair[1])
408
- # # elif tts_choice == "Beta":
409
- # # audio_future = executor.submit(generate_audio_parler_tts, response_pair[1])
410
- # # elif tts_choice == "Gamma":
411
- # # audio_future = executor.submit(generate_audio_mars5, response_pair[1])
412
-
413
- # # for character in response_pair[1]:
414
- # # history[-1][1] += character
415
- # # time.sleep(0.05)
416
- # # yield history, None
417
-
418
- # # audio_path = audio_future.result()
419
- # # yield history, audio_path
420
-
421
- # # def add_message(history, message):
422
- # # history = history or []
423
- # # history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
424
- # # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
425
-
426
- # def bot(history, choice, tts_choice):
427
- # if not history:
428
- # return history
429
- # response_pair, addresses = generate_answer(history[-1][0], choice)
430
- # # Ensure history has the correct format
431
- # if len(history[-1]) == 1:
432
- # history[-1].append("")
433
- # elif len(history[-1]) == 0:
434
- # history[-1] = [history[-1][0], ""]
435
-
436
- # with concurrent.futures.ThreadPoolExecutor() as executor:
437
- # if tts_choice == "Alpha":
438
- # audio_future = executor.submit(generate_audio_elevenlabs, response_pair[1])
439
- # elif tts_choice == "Beta":
440
- # audio_future = executor.submit(generate_audio_parler_tts, response_pair[1])
441
- # elif tts_choice == "Gamma":
442
- # audio_future = executor.submit(generate_audio_mars5, response_pair[1])
443
-
444
- # for character in response_pair[1]:
445
- # history[-1][1] += character
446
- # time.sleep(0.05)
447
- # yield history, None
448
-
449
- # audio_path = audio_future.result()
450
- # yield history, audio_path
451
-
452
- # def add_message(history, message):
453
- # history = history or []
454
- # history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
455
- # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
456
- #--------------------------------------------------------
457
  # def generate_voice_response(history, tts_choice):
458
  # if not history:
459
  # return None
@@ -661,7 +620,7 @@ def transcribe_function(stream, new_chunk):
661
 
662
  result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
663
 
664
- full_text = result.get("text")
665
 
666
  return stream, full_text, result
667
 
@@ -899,3 +858,4 @@ demo.launch(share=True)
899
 
900
 
901
 
 
 
351
  addresses = extract_addresses(response['output'])
352
  return response['output'], addresses
353
 
354
+ # def bot(history, choice, tts_choice):
355
+ # if not history:
356
+ # return history
357
+ # response, addresses = generate_answer(history[-1][0], choice)
358
+ # history[-1][1] = ""
359
+
360
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
361
+ # if tts_choice == "Alpha":
362
+ # audio_future = executor.submit(generate_audio_elevenlabs, response)
363
+ # elif tts_choice == "Beta":
364
+ # audio_future = executor.submit(generate_audio_parler_tts, response)
365
+ # elif tts_choice == "Gamma":
366
+ # audio_future = executor.submit(generate_audio_mars5, response)
367
+ # elif tts_choice == "Delta":
368
+ # audio_future = executor.submit(generate_audio_fishaudio, response)
369
+
370
+ # for character in response:
371
+ # history[-1][1] += character
372
+ # time.sleep(0.05)
373
+ # yield history, None
374
+
375
+ # audio_path = audio_future.result()
376
+ # yield history, audio_path
377
+
378
  def bot(history, choice, tts_choice):
379
  if not history:
380
  return history
 
 
381
 
382
+ user_message = history[-1][0]
383
+ response_text, addresses = generate_answer(user_message, choice)
384
+
385
+ # Ensure history has the correct format
386
+ if len(history[-1]) == 1:
387
+ history[-1].append(response_text)
388
+ else:
389
+ history[-1][1] = response_text
390
+
391
  with concurrent.futures.ThreadPoolExecutor() as executor:
392
  if tts_choice == "Alpha":
393
+ audio_future = executor.submit(generate_audio_elevenlabs, response_text)
394
  elif tts_choice == "Beta":
395
+ audio_future = executor.submit(generate_audio_parler_tts, response_text)
396
  elif tts_choice == "Gamma":
397
+ audio_future = executor.submit(generate_audio_mars5, response_text)
 
 
398
 
399
+ for character in response_text:
400
  history[-1][1] += character
401
  time.sleep(0.05)
402
  yield history, None
 
404
  audio_path = audio_future.result()
405
  yield history, audio_path
406
 
407
+ # def add_message(history, message):
408
+ # history.append((message, None))
409
+ # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
410
+
411
  def add_message(history, message):
412
+ history = history or []
413
+ history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
414
  return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
 
 
 
 
 
 
 
 
 
 
 
 
415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
  # def generate_voice_response(history, tts_choice):
417
  # if not history:
418
  # return None
 
620
 
621
  result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
622
 
623
+ full_text = result.get("text","")
624
 
625
  return stream, full_text, result
626
 
 
858
 
859
 
860
 
861
+