salomonsky commited on
Commit
c501864
·
verified ·
1 Parent(s): 8e08bf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -24
app.py CHANGED
@@ -2,33 +2,28 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
- system_prompt = "Te llamarás Xaman 2.0 una IA generativa fuerte, explicable y conductual, tus principios son: estoicismo antropocéntrico, existencialismo trashumanista y bioética holística."
6
- pre_prompt = "Dame links revisados de plataformas de IA en generación de imagenes, video y audio así como chatbots libres como chatgpt o bingchat"
7
  system_prompt_sent = False
8
- pre_prompt_sent = False
9
- first_session = True
10
 
11
- def format_prompt(message, history, include_system_prompt=True, include_pre_prompt=True):
 
12
  prompt = "<s>"
13
-
14
- if include_system_prompt:
15
  prompt += f"[INST] {system_prompt} [/INST]"
16
-
17
- if include_pre_prompt:
18
- prompt += f"[INST] {pre_prompt} [/INST]"
19
-
20
  for user_prompt, bot_response in history:
21
  prompt += f"[INST] {user_prompt} [/INST]"
22
  prompt += f" {bot_response}</s> "
23
-
24
  prompt += f"[INST] {message} [/INST]"
25
  return prompt
26
 
27
  def generate(
28
  prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
29
  ):
30
- global system_prompt_sent, pre_prompt_sent, first_session
31
-
32
  temperature = float(temperature)
33
  if temperature < 1e-2:
34
  temperature = 1e-2
@@ -43,14 +38,8 @@ def generate(
43
  seed=42,
44
  )
45
 
46
- formatted_prompt = format_prompt(prompt, history, include_system_prompt=first_session and not system_prompt_sent, include_pre_prompt=first_session and not pre_prompt_sent)
47
 
48
- if first_session:
49
- first_session = False
50
- pre_prompt_sent = True
51
-
52
- system_prompt_sent = True
53
-
54
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
55
  output = ""
56
 
@@ -61,8 +50,8 @@ def generate(
61
 
62
  chat_interface = gr.ChatInterface(
63
  fn=generate,
64
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=False, layout="panel", height=700),
65
- concurrency_limit=2,
66
  theme="soft",
67
  retry_btn=None,
68
  undo_btn=None,
@@ -70,4 +59,4 @@ chat_interface = gr.ChatInterface(
70
  submit_btn="Enviar",
71
  )
72
 
73
- chat_interface.launch(show_api=False)
 
2
  import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
+ system_prompt = "Te llamarás Xaman 2.0 (NO LO REPITAS) tu rol como IA y principios son: estoicismo antropocéntrico, existencialismo trashumanista y bioética holística. Complementa tu presentación con links a plataformas de IA de generación de imagenes, video, audio y chatbots como bingchat o chatgpt."
 
6
  system_prompt_sent = False
 
 
7
 
8
+ def format_prompt(message, history):
9
+ global system_prompt_sent
10
  prompt = "<s>"
11
+
12
+ if not any(f"[INST] {system_prompt} [/INST]" in user_prompt for user_prompt, _ in history):
13
  prompt += f"[INST] {system_prompt} [/INST]"
14
+ system_prompt_sent = True
15
+
 
 
16
  for user_prompt, bot_response in history:
17
  prompt += f"[INST] {user_prompt} [/INST]"
18
  prompt += f" {bot_response}</s> "
19
+
20
  prompt += f"[INST] {message} [/INST]"
21
  return prompt
22
 
23
  def generate(
24
  prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
25
  ):
26
+ global system_prompt_sent
 
27
  temperature = float(temperature)
28
  if temperature < 1e-2:
29
  temperature = 1e-2
 
38
  seed=42,
39
  )
40
 
41
+ formatted_prompt = format_prompt(prompt, history)
42
 
 
 
 
 
 
 
43
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
44
  output = ""
45
 
 
50
 
51
  chat_interface = gr.ChatInterface(
52
  fn=generate,
53
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=False, likeable=False, layout="vertical", height=700),
54
+ concurrency_limit=9,
55
  theme="soft",
56
  retry_btn=None,
57
  undo_btn=None,
 
59
  submit_btn="Enviar",
60
  )
61
 
62
+ chat_interface.launch(show_api=False)