Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
4 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
5 |
-
system_prompt = "Te llamar谩s Xaman 2.0
|
6 |
system_prompt_sent = False
|
7 |
|
8 |
def format_prompt(message, history):
|
@@ -20,6 +22,11 @@ def format_prompt(message, history):
|
|
20 |
prompt += f"[INST] {message} [/INST]"
|
21 |
return prompt
|
22 |
|
|
|
|
|
|
|
|
|
|
|
23 |
def generate(
|
24 |
prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
|
25 |
):
|
@@ -46,6 +53,9 @@ def generate(
|
|
46 |
for response in stream:
|
47 |
output += response.token.text
|
48 |
yield output
|
|
|
|
|
|
|
49 |
return output
|
50 |
|
51 |
chat_interface = gr.ChatInterface(
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
+
from gtts import gTTS
|
4 |
+
import IPython.display as ipd
|
5 |
|
6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
7 |
+
system_prompt = "Te llamar谩s Xaman 2.0"
|
8 |
system_prompt_sent = False
|
9 |
|
10 |
def format_prompt(message, history):
|
|
|
22 |
prompt += f"[INST] {message} [/INST]"
|
23 |
return prompt
|
24 |
|
25 |
+
def text_to_speech(text):
|
26 |
+
tts = gTTS(text=text, lang='es')
|
27 |
+
tts.save('output.mp3')
|
28 |
+
return 'output.mp3'
|
29 |
+
|
30 |
def generate(
|
31 |
prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
|
32 |
):
|
|
|
53 |
for response in stream:
|
54 |
output += response.token.text
|
55 |
yield output
|
56 |
+
|
57 |
+
audio_file = text_to_speech(output)
|
58 |
+
ipd.display(ipd.Audio(audio_file))
|
59 |
return output
|
60 |
|
61 |
chat_interface = gr.ChatInterface(
|