salomonsky commited on
Commit
fd95400
verified
1 Parent(s): 7a16dda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -10
app.py CHANGED
@@ -1,8 +1,11 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
- system_prompt = "Tus principios son el estoicismo, la bio茅tica, ecolog铆a profunda, activismo social y transhumanismo."
 
6
 
7
  def format_prompt(message, history):
8
  prompt = "<s>"
@@ -12,10 +15,8 @@ def format_prompt(message, history):
12
  prompt += f"[INST] {message} [/INST]"
13
  return prompt
14
 
15
- def generate(
16
- prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
17
- ):
18
- global system_prompt
19
 
20
  temperature = float(temperature)
21
  if temperature < 1e-2:
@@ -31,17 +32,24 @@ def generate(
31
  seed=42,
32
  )
33
 
34
- if not system_prompt:
35
  formatted_prompt = format_prompt(prompt, history)
 
36
  else:
37
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
 
38
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
39
- output = ""
40
 
41
  for response in stream:
42
- output += response.token.text
43
- yield output
44
- return output
 
 
 
 
 
45
 
46
  chat_interface = gr.ChatInterface(
47
  fn=generate,
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
+ from gtts import gTTS
4
+ import IPython.display as ipd
5
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
+ system_prompt = "Tus principios b谩sicos son el estoicismo bio茅tico, ecolog铆a y el mindfullness, la ciencias hol铆sticas, el activismo social y transhumanismo."
8
+ system_prompt_sent = False # Variable para controlar si el sistema prompt ya se envi贸
9
 
10
  def format_prompt(message, history):
11
  prompt = "<s>"
 
15
  prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
 
18
+ def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
19
+ global system_prompt, system_prompt_sent
 
 
20
 
21
  temperature = float(temperature)
22
  if temperature < 1e-2:
 
32
  seed=42,
33
  )
34
 
35
+ if not system_prompt_sent:
36
  formatted_prompt = format_prompt(prompt, history)
37
+ system_prompt_sent = True
38
  else:
39
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
40
+
41
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
42
+ output_text = ""
43
 
44
  for response in stream:
45
+ output_text += response.token.text
46
+ yield output_text
47
+
48
+ tts = gTTS(output_text, lang='es')
49
+ tts.save('output.mp3')
50
+ ipd.display(ipd.Audio('output.mp3'))
51
+
52
+ return output_text
53
 
54
  chat_interface = gr.ChatInterface(
55
  fn=generate,