ruslanmv commited on
Commit
8d7f1c5
·
verified ·
1 Parent(s): 0623d04

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -91
app.py CHANGED
@@ -2,131 +2,185 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer # Import the tokenizer
4
 
5
- # Load model & tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
- # Define a maximum context length (tokens). Adjust this based on your model!
10
- MAX_CONTEXT_LENGTH = 4096
11
 
12
  nvc_prompt_template = r"""<|system|>
13
  You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
14
- ...
15
- Thank you for sharing with me. If you’d like to continue this conversation later, I’m here to help.</s>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  """
17
 
 
18
  def count_tokens(text: str) -> int:
19
  """Counts the number of tokens in a given string."""
20
  return len(tokenizer.encode(text))
21
 
22
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
23
- """Truncates the conversation history to fit within the maximum token limit."""
 
 
 
 
 
 
 
 
 
24
  truncated_history = []
25
  system_message_tokens = count_tokens(system_message)
26
  current_length = system_message_tokens
27
 
28
- # Iterate backwards (newest first) and build up history until the limit is reached
29
  for user_msg, assistant_msg in reversed(history):
30
  user_tokens = count_tokens(user_msg) if user_msg else 0
31
  assistant_tokens = count_tokens(assistant_msg) if assistant_msg else 0
32
  turn_tokens = user_tokens + assistant_tokens
33
 
34
  if current_length + turn_tokens <= max_length:
35
- truncated_history.insert(0, (user_msg, assistant_msg))
36
  current_length += turn_tokens
37
  else:
38
- break
 
39
  return truncated_history
40
 
41
- def respond(message, history, system_message, max_tokens, temperature, top_p):
42
- """
43
- Calls the LLM with the current message and history.
44
- This function is a generator that streams tokens.
45
- """
46
- # Use the provided system_message (from our prompt display)
47
- formatted_system_message = system_message
48
- truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
 
49
 
50
- # Build the message list (system, then history, then new message)
51
- messages = [{"role": "system", "content": formatted_system_message}]
 
 
 
52
  for user_msg, assistant_msg in truncated_history:
53
  if user_msg:
54
- messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
55
  if assistant_msg:
56
- messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
57
- messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
 
 
58
 
59
  response = ""
60
  try:
61
- for chunk in client.chat_completion(
62
- messages,
63
- max_tokens=max_tokens,
64
- stream=True,
65
- temperature=temperature,
66
- top_p=top_p,
67
- ):
68
- token = chunk.choices[0].delta.content
69
- response += token
70
- yield response
71
  except Exception as e:
72
- print(f"An error occurred: {e}")
73
- yield "I'm sorry, I encountered an error. Please try again."
74
-
75
- def generate_response(message, history, system_message, max_tokens, temperature, top_p):
76
- """
77
- Wrapper function that collects the final response (from the generator)
78
- and updates the conversation history.
79
- """
80
- final_response = ""
81
- for r in respond(message, history, system_message, max_tokens, temperature, top_p):
82
- final_response = r # (In a streaming setup, you might update incrementally.)
83
- updated_history = history + [(message, final_response)]
84
- # Return both the updated chat (for the Chatbot component) and updated state.
85
- return updated_history, updated_history
86
-
87
- def clear_memory():
88
- """Resets the conversation history."""
89
- return [], []
90
-
91
- # --- Build the Gradio Interface using Blocks ---
92
- with gr.Blocks() as demo:
93
- # Hidden settings in an Accordion (closed by default)
94
- with gr.Accordion("Settings", open=False):
95
- max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
96
- temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
97
- top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
98
-
99
- # Display the prompt below the settings
100
- prompt_display = gr.Textbox(
101
- value=nvc_prompt_template,
102
- label="Prompt",
103
- interactive=False,
104
- lines=10
105
- )
106
-
107
- # Chatbot component to display conversation history (using "messages" type)
108
- chatbot = gr.Chatbot(type="messages")
109
-
110
- # Hidden state to store conversation history (list of tuples)
111
- state = gr.State([])
112
-
113
- # Row for user input and a clear memory button
114
- with gr.Row():
115
- txt = gr.Textbox(show_label=False, placeholder="Enter your message")
116
- clear_btn = gr.Button("Clear Memory")
117
-
118
- # When a message is submitted, call generate_response and update both the chatbot and state.
119
- txt.submit(
120
- generate_response,
121
- inputs=[txt, state, prompt_display, max_tokens_slider, temperature_slider, top_p_slider],
122
- outputs=[chatbot, state]
123
- )
124
-
125
- # Clear memory button resets the chatbot display and state.
126
- clear_btn.click(
127
- clear_memory,
128
- outputs=[chatbot, state]
129
- )
130
 
131
  if __name__ == "__main__":
132
- demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer # Import the tokenizer
4
 
5
+ # Use the appropriate tokenizer for your model.
6
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ # Define a maximum context length (tokens). Check your model's documentation!
10
+ MAX_CONTEXT_LENGTH = 4096 # Example: Adjust this based on your model!
11
 
12
  nvc_prompt_template = r"""<|system|>
13
  You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
14
+
15
+ 1. **Goal of the Conversation**
16
+    - Translate the user’s story or judgments into feelings and needs.
17
+    - Work together to identify a clear request, following these steps:
18
+      - Recognize the feeling
19
+      - Clarify the need
20
+      - Formulate the request
21
+      - Give a full sentence containing an observation, a feeling, a need, and a request based on the principles of nonviolent communication.
22
+
23
+ 2. **Greeting and Invitation**
24
+    - When a user starts with a greeting (e.g., “Hello,” “Hi”), greet them back.
25
+    - If the user does not immediately begin sharing a story, ask what they’d like to talk about.
26
+    - If the user starts sharing a story right away, skip the “What would you like to talk about?” question.
27
+
28
+ 3. **Exploring the Feeling**
29
+    - Ask if the user would like to share more about what they’re feeling in this situation.
30
+    - If you need more information, use a variation of: “Could you tell me more so I can try to understand you better?”
31
+
32
+ 4. **Identifying the Feeling**
33
+    - Use one feeling plus one need per guess, for example:
34
+      - “Do you perhaps feel anger because you want to be appreciated?”
35
+      - “Are you feeling sadness because connection is important to you?”
36
+      - “Do you feel fear because you’re longing for safety?”
37
+    - Never use quasi- or pseudo-feelings (such as rejected, misunderstood, excluded). If the user uses such words, translate them into a real feeling (e.g., sadness, loneliness, frustration).
38
+    - When naming feelings, never use sentence structures like “do you feel like...?” or “do you feel that...?”
39
+
40
+ 5. **Clarifying the Need**
41
+    - Once a feeling is clear, do not keep asking about it in every response. Then focus on the need.
42
+    - If the need is still unclear, ask again for clarification: “Could you tell me a bit more so I can understand you better?”
43
+    - If there’s still no clarity after repeated attempts, use the ‘pivot question’:
44
+      - “Imagine that the person you’re talking about did exactly what you want. What would that give you?”
45
+    - **Extended List of Needs** (use these as reference):
46
+      - **Connection**: Understanding, empathy, closeness, belonging, inclusion, intimacy, companionship, community.
47
+      - **Autonomy**: Freedom, choice, independence, self-expression, self-determination.
48
+      - **Safety**: Security, stability, trust, predictability, protection.
49
+      - **Respect**: Appreciation, acknowledgment, recognition, validation, consideration.
50
+      - **Meaning**: Purpose, contribution, growth, learning, creativity, inspiration.
51
+      - **Physical Well-being**: Rest, nourishment, health, comfort, ease.
52
+      - **Play**: Joy, fun, spontaneity, humor, lightness.
53
+      - **Peace**: Harmony, calm, balance, tranquility, resolution.
54
+      - **Support**: Help, cooperation, collaboration, encouragement, guidance.
55
+
56
+ 6. **Creating the Request**
57
+    - If the need is clear and the user confirms it, ask if they have a request in mind.
58
+    - Check whether the request is directed at themselves, at another person, or at others.
59
+    - Determine together whether it’s an action request (“Do you want someone to do or stop doing something?”) or a connection request (“Do you want acknowledgment, understanding, contact?”).
60
+    - Guide the user in formulating that request more precisely until it’s formulated.
61
+
62
+ 7. **Formulating the Full Sentence (Observation, Feeling, Need, Request)**
63
+    - Ask if the user wants to formulate a sentence following this structure.
64
+    - If they say ‘yes,’ ask if they’d like an example of how they might say it to the person in question.
65
+    - If they say ‘no,’ invite them to provide more input or share more judgments so the conversation can progress.
66
+
67
+ 8. **No Advice**
68
+    - Under no circumstance give advice.
69
+    - If the user implicitly or explicitly asks for advice, respond with:
70
+      - "I’m unfortunately not able to give you advice. I can help you identify your feeling and need, and perhaps put this into a sentence you might find useful. Would you like to try that?"
71
+
72
+ 9. **Response Length**
73
+    - Limit each response to a maximum of 100 words.
74
+
75
+ 10. **Quasi- and Pseudo-Feelings**
76
+     - If the user says something like "I feel rejected" or "I feel misunderstood," translate that directly into a suitable real feeling and clarify with a question:
77
+       - “If you believe you’re being rejected, are you possibly feeling loneliness or sadness?”
78
+       - “If you say you feel misunderstood, might you be experiencing disappointment or frustration because you have a need to be heard?”
79
+
80
+ 11. **No Theoretical Explanations**
81
+     - Never give detailed information or background about Nonviolent Communication theory, nor refer to its founders or theoretical framework.
82
+
83
+ 12. **Handling Resistance or Confusion**
84
+     - If the user seems confused or resistant, gently reflect their feelings and needs:
85
+       - “It sounds like you’re feeling unsure about how to proceed. Would you like to take a moment to explore what’s coming up for you?”
86
+       - If the user becomes frustrated, acknowledge their frustration and refocus on their needs:
87
+       - “I sense some frustration. Would it help to take a step back and clarify what’s most important to you right now?”
88
+
89
+ 13. **Ending the Conversation**
90
+     - If the user indicates they want to end the conversation, thank them for sharing and offer to continue later:
91
+       - “Thank you for sharing with me. If you’d like to continue this conversation later, I’m here to help.”</s>
92
  """
93
 
94
+
95
  def count_tokens(text: str) -> int:
96
  """Counts the number of tokens in a given string."""
97
  return len(tokenizer.encode(text))
98
 
99
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
100
+ """Truncates the conversation history to fit within the maximum token limit.
101
+
102
+ Args:
103
+ history: The conversation history (list of user/assistant tuples).
104
+ system_message: The system message.
105
+ max_length: The maximum number of tokens allowed.
106
+
107
+ Returns:
108
+ The truncated history.
109
+ """
110
  truncated_history = []
111
  system_message_tokens = count_tokens(system_message)
112
  current_length = system_message_tokens
113
 
114
+ # Iterate backwards through the history (newest to oldest)
115
  for user_msg, assistant_msg in reversed(history):
116
  user_tokens = count_tokens(user_msg) if user_msg else 0
117
  assistant_tokens = count_tokens(assistant_msg) if assistant_msg else 0
118
  turn_tokens = user_tokens + assistant_tokens
119
 
120
  if current_length + turn_tokens <= max_length:
121
+ truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning
122
  current_length += turn_tokens
123
  else:
124
+ break # Stop adding turns if we exceed the limit
125
+
126
  return truncated_history
127
 
128
+ def respond(
129
+ message,
130
+ history: list[tuple[str, str]],
131
+ system_message,
132
+ max_tokens,
133
+ temperature,
134
+ top_p,
135
+ ):
136
+ """Responds to a user message, maintaining conversation history, using special tokens and message list."""
137
 
138
+ formatted_system_message = nvc_prompt_template
139
+
140
+ truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100) # Reserve space for the new message and some generation
141
+
142
+ messages = [{"role": "system", "content": formatted_system_message}] # Start with system message as before
143
  for user_msg, assistant_msg in truncated_history:
144
  if user_msg:
145
+ messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"}) # Format history user message
146
  if assistant_msg:
147
+ messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"}) # Format history assistant message
148
+
149
+ messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"}) # Format current user message
150
+
151
 
152
  response = ""
153
  try:
154
+ for chunk in client.chat_completion(
155
+ messages, # Send the messages list again, but with formatted content
156
+ max_tokens=max_tokens,
157
+ stream=True,
158
+ temperature=temperature,
159
+ top_p=top_p,
160
+ ):
161
+ token = chunk.choices[0].delta.content
162
+ response += token
163
+ yield response
164
  except Exception as e:
165
+ print(f"An error occurred: {e}") # It's a good practice add a try-except block
166
+ yield "I'm sorry, I encountered an error. Please try again."
167
+
168
+ # --- Gradio Interface ---
169
+ demo = gr.ChatInterface(
170
+ respond,
171
+ additional_inputs=[
172
+ gr.Textbox(value=nvc_prompt_template, label="System message", visible=False), # Set the NVC prompt as default and hide the system message box
173
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
174
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
175
+ gr.Slider(
176
+ minimum=0.1,
177
+ maximum=1.0,
178
+ value=0.95,
179
+ step=0.05,
180
+ label="Top-p (nucleus sampling)",
181
+ ),
182
+ ],
183
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
  if __name__ == "__main__":
186
+ demo.launch()