Spaces:
Runtime error
Runtime error
Update LLMwithvoice.py
Browse files- LLMwithvoice.py +8 -22
LLMwithvoice.py
CHANGED
@@ -4,7 +4,7 @@ import numpy as np
|
|
4 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
5 |
from parler_tts import ParlerTTSForConditionalGeneration
|
6 |
from pydub import AudioSegment
|
7 |
-
import
|
8 |
|
9 |
# Hugging Face API URL for Roberta model
|
10 |
API_URL_ROBERTA = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"
|
@@ -54,17 +54,12 @@ def generate_speech(answer):
|
|
54 |
channels=1
|
55 |
)
|
56 |
|
57 |
-
#
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
sample_rate=audio_segment.frame_rate
|
64 |
-
)
|
65 |
-
play_obj.wait_done() # Wait until the audio is done playing
|
66 |
-
except Exception as e:
|
67 |
-
print(f"Error playing audio: {e}")
|
68 |
|
69 |
# Function to interface with Gradio
|
70 |
def gradio_interface(api_token, prompt, context):
|
@@ -72,13 +67,4 @@ def gradio_interface(api_token, prompt, context):
|
|
72 |
if 'error' in answer:
|
73 |
return answer['error'], None
|
74 |
generate_speech(answer.get('answer', ''))
|
75 |
-
return answer.get('answer', 'No answer found'), None
|
76 |
-
|
77 |
-
# Example usage
|
78 |
-
if __name__ == "__main__":
|
79 |
-
api_token = "your_huggingface_api_token"
|
80 |
-
prompt = "What is the capital of France?"
|
81 |
-
context = "France, in Western Europe, encompasses medieval cities, alpine villages, and Mediterranean beaches. Paris, its capital, is famed for its fashion houses, classical art museums including the Louvre, and monuments like the Eiffel Tower."
|
82 |
-
|
83 |
-
answer, _ = gradio_interface(api_token, prompt, context)
|
84 |
-
print("Answer:", answer)
|
|
|
4 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
5 |
from parler_tts import ParlerTTSForConditionalGeneration
|
6 |
from pydub import AudioSegment
|
7 |
+
from IPython.display import Audio, display
|
8 |
|
9 |
# Hugging Face API URL for Roberta model
|
10 |
API_URL_ROBERTA = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"
|
|
|
54 |
channels=1
|
55 |
)
|
56 |
|
57 |
+
# Save the audio to a file
|
58 |
+
audio_file = "output.wav"
|
59 |
+
audio_segment.export(audio_file, format="wav")
|
60 |
+
|
61 |
+
# Play the audio using IPython.display.Audio
|
62 |
+
display(Audio(audio_file))
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
# Function to interface with Gradio
|
65 |
def gradio_interface(api_token, prompt, context):
|
|
|
67 |
if 'error' in answer:
|
68 |
return answer['error'], None
|
69 |
generate_speech(answer.get('answer', ''))
|
70 |
+
return answer.get('answer', 'No answer found'), None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|