salomonsky commited on
Commit
1fe1359
·
verified ·
1 Parent(s): f8a4c47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -26
app.py CHANGED
@@ -1,23 +1,29 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
- from gtts import gTTS
4
- import IPython.display as ipd
5
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
- system_prompt = "Tus principios básicos son el estoicismo bioético, ecología y el mindfullness, la ciencias holísticas, el activismo social y transhumanismo."
8
- system_prompt_sent = False
9
 
10
  def format_prompt(message, history):
 
11
  prompt = "<s>"
 
 
 
 
 
12
  for user_prompt, bot_response in history:
13
  prompt += f"[INST] {user_prompt} [/INST]"
14
  prompt += f" {bot_response}</s> "
 
15
  prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
 
18
- def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
19
- global system_prompt, system_prompt_sent
20
-
 
21
  temperature = float(temperature)
22
  if temperature < 1e-2:
23
  temperature = 1e-2
@@ -32,29 +38,20 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95,
32
  seed=42,
33
  )
34
 
35
- if not system_prompt_sent:
36
- formatted_prompt = format_prompt(prompt, history)
37
- system_prompt_sent = True
38
- else:
39
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
40
-
41
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
42
- output_text = ""
43
 
44
  for response in stream:
45
- output_text += response.token.text
46
- yield output_text
47
-
48
- tts = gTTS(output_text, lang='es')
49
- tts.save('output.mp3')
50
-
51
- ipd.display(ipd.Audio('output.mp3'))
52
-
53
- return output_text
54
 
55
- chat_interface_text = gr.ChatInterface(
56
  fn=generate,
57
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height=350),
 
58
  theme="soft",
59
  retry_btn=None,
60
  undo_btn=None,
@@ -62,4 +59,4 @@ chat_interface_text = gr.ChatInterface(
62
  submit_btn="Enviar",
63
  )
64
 
65
- chat_interface_text.launch(show_api=False)
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
+ system_prompt = "Te llamarás Xaman 2.0 (NO LO REPITAS) tu rol y principios son: estoicismo antropocéntrico, existencialismo trashumanista y bioética holística."
6
+ system_prompt_sent = False
7
 
8
  def format_prompt(message, history):
9
+ global system_prompt_sent
10
  prompt = "<s>"
11
+
12
+ if not any(f"[INST] {system_prompt} [/INST]" in user_prompt for user_prompt, _ in history):
13
+ prompt += f"[INST] {system_prompt} [/INST]"
14
+ system_prompt_sent = True
15
+
16
  for user_prompt, bot_response in history:
17
  prompt += f"[INST] {user_prompt} [/INST]"
18
  prompt += f" {bot_response}</s> "
19
+
20
  prompt += f"[INST] {message} [/INST]"
21
  return prompt
22
 
23
+ def generate(
24
+ prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
25
+ ):
26
+ global system_prompt_sent
27
  temperature = float(temperature)
28
  if temperature < 1e-2:
29
  temperature = 1e-2
 
38
  seed=42,
39
  )
40
 
41
+ formatted_prompt = format_prompt(prompt, history)
42
+
 
 
 
 
43
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
44
+ output = ""
45
 
46
  for response in stream:
47
+ output += response.token.text
48
+ yield output
49
+ return output
 
 
 
 
 
 
50
 
51
+ chat_interface = gr.ChatInterface(
52
  fn=generate,
53
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=False, likeable=False, layout="vertical", height=700),
54
+ concurrency_limit=9,
55
  theme="soft",
56
  retry_btn=None,
57
  undo_btn=None,
 
59
  submit_btn="Enviar",
60
  )
61
 
62
+ chat_interface.launch(show_api=False)