salomonsky commited on
Commit
4a766c1
·
verified ·
1 Parent(s): 0fa2257

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -1,6 +1,7 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  from gtts import gTTS
 
4
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
  system_prompt = "Tu nombre es Xaman 3.0"
@@ -27,7 +28,7 @@ def text_to_speech(text):
27
  return 'output.mp3'
28
 
29
  def generate(
30
- prompt, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
31
  ):
32
  global system_prompt_sent
33
  temperature = float(temperature) if temperature is not None else 0.9
@@ -44,7 +45,7 @@ def generate(
44
  seed=42,
45
  )
46
 
47
- formatted_prompt = format_prompt(prompt, history)
48
 
49
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
50
  output = ""
@@ -56,19 +57,20 @@ def generate(
56
  text_to_speech(output)
57
  return output
58
 
 
59
  output_audio = gr.Audio(type="file", label="Respuesta del chatbot en audio", autoplay=True)
60
- output_text = gr.Textbox(label="Respuesta del chatbot en texto")
 
 
 
61
 
62
  chat_interface = gr.Interface(
63
  fn=generate,
64
- inputs=["text"],
65
  outputs=[output_audio, output_text],
66
- theme="soft",
67
  live=True,
68
- retry_btn=None,
69
- undo_btn=None,
70
- clear_btn=None,
71
  submit_btn="Enviar",
72
  )
73
 
74
- chat_interface.launch()
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  from gtts import gTTS
4
+ import IPython.display as ipd
5
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
  system_prompt = "Tu nombre es Xaman 3.0"
 
28
  return 'output.mp3'
29
 
30
  def generate(
31
+ user_input, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
32
  ):
33
  global system_prompt_sent
34
  temperature = float(temperature) if temperature is not None else 0.9
 
45
  seed=42,
46
  )
47
 
48
+ formatted_prompt = format_prompt(user_input, history)
49
 
50
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
51
  output = ""
 
57
  text_to_speech(output)
58
  return output
59
 
60
+ input_text = gr.Textbox(label="Usuario", placeholder="Escribe aquí tu mensaje")
61
  output_audio = gr.Audio(type="file", label="Respuesta del chatbot en audio", autoplay=True)
62
+ output_text = gr.Textbox(label="Respuesta del chatbot en texto", readonly=True)
63
+
64
+ def update_output_text(output):
65
+ output_text.update(output)
66
 
67
  chat_interface = gr.Interface(
68
  fn=generate,
69
+ inputs=[input_text],
70
  outputs=[output_audio, output_text],
 
71
  live=True,
72
+ theme="soft",
 
 
73
  submit_btn="Enviar",
74
  )
75
 
76
+ chat_interface.launch(share=True, debug=True, capture_session=True, callback=update_output_text)