ariankhalfani commited on
Commit
a5f7b84
·
verified ·
1 Parent(s): 0b90662

Update LLMwithvoice.py

Browse files
Files changed (1) hide show
  1. LLMwithvoice.py +20 -14
LLMwithvoice.py CHANGED
@@ -1,6 +1,7 @@
1
  import requests
2
  import torch
3
- import soundfile as sf
 
4
  from transformers import AutoTokenizer
5
  from parler_tts import ParlerTTSForConditionalGeneration
6
 
@@ -29,9 +30,16 @@ def query_roberta(api_token, prompt, context):
29
  headers = {"Authorization": f"Bearer {api_token}"}
30
  response = requests.post(API_URL_ROBERTA, headers=headers, json=payload)
31
  try:
32
- return response.json()
33
- except ValueError:
34
- return {"error": "Invalid JSON response"}
 
 
 
 
 
 
 
35
 
36
  def generate_speech(answer):
37
  input_ids = tokenizer(answer, return_tensors="pt").input_ids.to(device)
@@ -39,19 +47,17 @@ def generate_speech(answer):
39
  generation = model.generate(input_ids=input_ids).to(torch.float32)
40
  audio_arr = generation.cpu().numpy().squeeze()
41
 
42
- # Construct the audio path dynamically
43
- audio_filename = "parler_tts_out.wav"
44
- audio_path = f"/mnt/data/{audio_filename}"
45
-
46
  try:
47
- sf.write(audio_path, audio_arr, model.config.sampling_rate)
 
48
  except Exception as e:
49
- print(f"Error writing audio file: {e}")
50
  # Handle the error, raise or log it, or provide an alternative approach
51
 
52
- return audio_path
53
-
54
  def gradio_interface(api_token, prompt, context):
55
  answer = query_roberta(api_token, prompt, context)
56
- audio_path = generate_speech(answer)
57
- return answer, audio_path
 
 
 
1
  import requests
2
  import torch
3
+ import numpy as np
4
+ import sounddevice as sd
5
  from transformers import AutoTokenizer
6
  from parler_tts import ParlerTTSForConditionalGeneration
7
 
 
30
  headers = {"Authorization": f"Bearer {api_token}"}
31
  response = requests.post(API_URL_ROBERTA, headers=headers, json=payload)
32
  try:
33
+ response_json = response.json()
34
+ if 'error' in response_json:
35
+ raise ValueError(response_json['error'])
36
+ return response_json
37
+ except ValueError as e:
38
+ print(f"ValueError: {e}")
39
+ return {"error": str(e)}
40
+ except Exception as e:
41
+ print(f"Exception: {e}")
42
+ return {"error": "An unexpected error occurred"}
43
 
44
  def generate_speech(answer):
45
  input_ids = tokenizer(answer, return_tensors="pt").input_ids.to(device)
 
47
  generation = model.generate(input_ids=input_ids).to(torch.float32)
48
  audio_arr = generation.cpu().numpy().squeeze()
49
 
50
+ # Play the generated audio
 
 
 
51
  try:
52
+ sd.play(audio_arr, samplerate=model.config.sampling_rate)
53
+ sd.wait() # Wait until the audio is done playing
54
  except Exception as e:
55
+ print(f"Error playing audio: {e}")
56
  # Handle the error, raise or log it, or provide an alternative approach
57
 
 
 
58
  def gradio_interface(api_token, prompt, context):
59
  answer = query_roberta(api_token, prompt, context)
60
+ if 'error' in answer:
61
+ return answer['error'], None
62
+ generate_speech(answer.get('answer', ''))
63
+ return answer.get('answer', 'No answer found'), None