ruslanmv commited on
Commit
0e0efc5
·
verified ·
1 Parent(s): 9b733eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -70
app.py CHANGED
@@ -1,59 +1,152 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from transformers import AutoTokenizer # Import the tokenizer
4
 
5
- # Load model & tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
- # Define a maximum context length (tokens). Adjust this based on your model!
10
- MAX_CONTEXT_LENGTH = 4096
11
 
12
  nvc_prompt_template = r"""<|system|>
13
  You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
14
- ...
15
- Thank you for sharing with me. If you’d like to continue this conversation later, I’m here to help.</s>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  """
17
 
 
18
  def count_tokens(text: str) -> int:
19
  """Counts the number of tokens in a given string."""
20
  return len(tokenizer.encode(text))
21
 
22
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
23
- """Truncates the conversation history to fit within the maximum token limit."""
 
 
 
 
 
 
 
 
 
24
  truncated_history = []
25
  system_message_tokens = count_tokens(system_message)
26
  current_length = system_message_tokens
27
 
28
- # Iterate backwards (newest first) and build up history until the limit is reached
29
  for user_msg, assistant_msg in reversed(history):
30
  user_tokens = count_tokens(user_msg) if user_msg else 0
31
  assistant_tokens = count_tokens(assistant_msg) if assistant_msg else 0
32
  turn_tokens = user_tokens + assistant_tokens
33
 
34
  if current_length + turn_tokens <= max_length:
35
- truncated_history.insert(0, (user_msg, assistant_msg))
36
  current_length += turn_tokens
37
  else:
38
- break
 
39
  return truncated_history
40
 
41
- def respond(message, history, system_message, max_tokens, temperature, top_p):
42
- """
43
- Calls the LLM with the current message and history.
44
- This function is a generator that streams tokens.
45
- """
46
- # Use the provided system_message (from our prompt display)
47
- formatted_system_message = system_message
 
 
 
 
 
48
  truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
49
 
50
- # Build the message list (system, then history, then new message)
51
  messages = [{"role": "system", "content": formatted_system_message}]
52
  for user_msg, assistant_msg in truncated_history:
53
  if user_msg:
54
  messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
55
  if assistant_msg:
56
  messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
 
57
  messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
58
 
59
  response = ""
@@ -66,67 +159,50 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
66
  top_p=top_p,
67
  ):
68
  token = chunk.choices[0].delta.content
69
- response += token
70
- yield response
 
 
 
71
  except Exception as e:
72
  print(f"An error occurred: {e}")
73
- yield "I'm sorry, I encountered an error. Please try again."
 
74
 
75
- def generate_response(message, history, system_message, max_tokens, temperature, top_p):
76
- """
77
- Wrapper function that collects the final response (from the generator)
78
- and updates the conversation history.
79
- """
80
- final_response = ""
81
- for r in respond(message, history, system_message, max_tokens, temperature, top_p):
82
- final_response = r # (In a streaming setup, you might update incrementally.)
83
- updated_history = history + [(message, final_response)]
84
- # Return both the updated chat (for the Chatbot component) and updated state.
85
- return updated_history, updated_history
86
-
87
- def clear_memory():
88
- """Resets the conversation history."""
89
- return [], []
90
-
91
- # --- Build the Gradio Interface using Blocks ---
92
- with gr.Blocks() as demo:
93
- # Hidden settings in an Accordion (closed by default)
94
- with gr.Accordion("Settings", open=False):
95
- max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
96
- temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
97
- top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
98
 
99
- # Display the prompt below the settings
100
- prompt_display = gr.Textbox(
101
- value=nvc_prompt_template,
102
- label="Prompt",
103
- interactive=False,
104
- lines=10
105
- )
106
 
107
- # Chatbot component to display conversation history
108
- chatbot = gr.Chatbot()
109
 
110
- # Hidden state to store conversation history (list of tuples)
111
- state = gr.State([])
112
 
113
- # Row for user input and a clear memory button
 
 
 
114
  with gr.Row():
115
- txt = gr.Textbox(show_label=False, placeholder="Enter your message").style(container=False)
116
  clear_btn = gr.Button("Clear Memory")
117
 
118
- # When a message is submitted, call generate_response and update both the chatbot and state.
119
- txt.submit(
120
- generate_response,
121
- inputs=[txt, state, prompt_display, max_tokens_slider, temperature_slider, top_p_slider],
122
- outputs=[chatbot, state]
123
- )
124
-
125
- # Clear memory button resets the chatbot display and state.
126
- clear_btn.click(
127
- clear_memory,
128
- outputs=[chatbot, state]
129
- )
 
 
 
 
 
130
 
131
  if __name__ == "__main__":
132
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from transformers import AutoTokenizer
4
 
5
+ # Use the appropriate tokenizer for your model.
6
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ # Define a maximum context length (tokens). Check your model's documentation!
10
+ MAX_CONTEXT_LENGTH = 4096 # Example: Adjust this based on your model!
11
 
12
  nvc_prompt_template = r"""<|system|>
13
  You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
14
+
15
+ 1. **Goal of the Conversation**
16
+ - Translate the user’s story or judgments into feelings and needs.
17
+ - Work together to identify a clear request, following these steps:
18
+ - Recognize the feeling
19
+ - Clarify the need
20
+ - Formulate the request
21
+ - Give a full sentence containing an observation, a feeling, a need, and a request based on the principles of nonviolent communication.
22
+
23
+ 2. **Greeting and Invitation**
24
+ - When a user starts with a greeting (e.g., “Hello,” “Hi”), greet them back.
25
+ - If the user does not immediately begin sharing a story, ask what they’d like to talk about.
26
+ - If the user starts sharing a story right away, skip the “What would you like to talk about?” question.
27
+
28
+ 3. **Exploring the Feeling**
29
+ - Ask if the user would like to share more about what they’re feeling in this situation.
30
+ - If you need more information, use a variation of: “Could you tell me more so I can try to understand you better?”
31
+
32
+ 4. **Identifying the Feeling**
33
+ - Use one feeling plus one need per guess, for example:
34
+ - “Do you perhaps feel anger because you want to be appreciated?”
35
+ - “Are you feeling sadness because connection is important to you?”
36
+ - “Do you feel fear because you’re longing for safety?”
37
+ - Never use quasi- or pseudo-feelings (such as rejected, misunderstood, excluded). If the user uses such words, translate them into a real feeling (e.g., sadness, loneliness, frustration).
38
+ - When naming feelings, never use sentence structures like “do you feel like...?” or “do you feel that...?”
39
+
40
+ 5. **Clarifying the Need**
41
+ - Once a feeling is clear, do not keep asking about it in every response. Then focus on the need.
42
+ - If the need is still unclear, ask again for clarification: “Could you tell me a bit more so I can understand you better?”
43
+ - If there’s still no clarity after repeated attempts, use the ‘pivot question’:
44
+ - “Imagine that the person you’re talking about did exactly what you want. What would that give you?”
45
+ - **Extended List of Needs** (use these as reference):
46
+ - **Connection**: Understanding, empathy, closeness, belonging, inclusion, intimacy, companionship, community.
47
+ - **Autonomy**: Freedom, choice, independence, self-expression, self-determination.
48
+ - **Safety**: Security, stability, trust, predictability, protection.
49
+ - **Respect**: Appreciation, acknowledgment, recognition, validation, consideration.
50
+ - **Meaning**: Purpose, contribution, growth, learning, creativity, inspiration.
51
+ - **Physical Well-being**: Rest, nourishment, health, comfort, ease.
52
+ - **Play**: Joy, fun, spontaneity, humor, lightness.
53
+ - **Peace**: Harmony, calm, balance, tranquility, resolution.
54
+ - **Support**: Help, cooperation, collaboration, encouragement, guidance.
55
+
56
+ 6. **Creating the Request**
57
+ - If the need is clear and the user confirms it, ask if they have a request in mind.
58
+ - Check whether the request is directed at themselves, at another person, or at others.
59
+ - Determine together whether it’s an action request (“Do you want someone to do or stop doing something?”) or a connection request (“Do you want acknowledgment, understanding, contact?”).
60
+ - Guide the user in formulating that request more precisely until it’s formulated.
61
+
62
+ 7. **Formulating the Full Sentence (Observation, Feeling, Need, Request)**
63
+ - Ask if the user wants to formulate a sentence following this structure.
64
+ - If they say ‘yes,’ ask if they’d like an example of how they might say it to the person in question.
65
+ - If they say ‘no,’ invite them to provide more input or share more judgments so the conversation can progress.
66
+
67
+ 8. **No Advice**
68
+ - Under no circumstance give advice.
69
+ - If the user implicitly or explicitly asks for advice, respond with:
70
+ - "I’m unfortunately not able to give you advice. I can help you identify your feeling and need, and perhaps put this into a sentence you might find useful. Would you like to try that?"
71
+
72
+ 9. **Response Length**
73
+ - Limit each response to a maximum of 100 words.
74
+
75
+ 10. **Quasi- and Pseudo-Feelings**
76
+ - If the user says something like "I feel rejected" or "I feel misunderstood," translate that directly into a suitable real feeling and clarify with a question:
77
+ - “If you believe you’re being rejected, are you possibly feeling loneliness or sadness?”
78
+ - “If you say you feel misunderstood, might you be experiencing disappointment or frustration because you have a need to be heard?”
79
+
80
+ 11. **No Theoretical Explanations**
81
+ - Never give detailed information or background about Nonviolent Communication theory, nor refer to its founders or theoretical framework.
82
+
83
+ 12. **Handling Resistance or Confusion**
84
+ - If the user seems confused or resistant, gently reflect their feelings and needs:
85
+ - “It sounds like you’re feeling unsure about how to proceed. Would you like to take a moment to explore what’s coming up for you?”
86
+ - If the user becomes frustrated, acknowledge their frustration and refocus on their needs:
87
+ - “I sense some frustration. Would it help to take a step back and clarify what’s most important to you right now?”
88
+
89
+ 13. **Ending the Conversation**
90
+ - If the user indicates they want to end the conversation, thank them for sharing and offer to continue later:
91
+ - “Thank you for sharing with me. If you’d like to continue this conversation later, I’m here to help.”
92
+ </s>
93
  """
94
 
95
+
96
  def count_tokens(text: str) -> int:
97
  """Counts the number of tokens in a given string."""
98
  return len(tokenizer.encode(text))
99
 
100
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
101
+ """Truncates the conversation history to fit within the maximum token limit.
102
+
103
+ Args:
104
+ history: The conversation history (list of user/assistant tuples).
105
+ system_message: The system message.
106
+ max_length: The maximum number of tokens allowed.
107
+
108
+ Returns:
109
+ The truncated history.
110
+ """
111
  truncated_history = []
112
  system_message_tokens = count_tokens(system_message)
113
  current_length = system_message_tokens
114
 
115
+ # Iterate backwards through the history (newest to oldest)
116
  for user_msg, assistant_msg in reversed(history):
117
  user_tokens = count_tokens(user_msg) if user_msg else 0
118
  assistant_tokens = count_tokens(assistant_msg) if assistant_msg else 0
119
  turn_tokens = user_tokens + assistant_tokens
120
 
121
  if current_length + turn_tokens <= max_length:
122
+ truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning
123
  current_length += turn_tokens
124
  else:
125
+ break # Stop adding turns if we exceed the limit
126
+
127
  return truncated_history
128
 
129
+ def respond(
130
+ message,
131
+ history: list[tuple[str, str]],
132
+ system_message,
133
+ max_tokens,
134
+ temperature,
135
+ top_p,
136
+ ):
137
+ """Responds to a user message, maintaining conversation history. Returns history."""
138
+
139
+ formatted_system_message = system_message # Use the provided system message
140
+
141
  truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
142
 
 
143
  messages = [{"role": "system", "content": formatted_system_message}]
144
  for user_msg, assistant_msg in truncated_history:
145
  if user_msg:
146
  messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
147
  if assistant_msg:
148
  messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
149
+
150
  messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
151
 
152
  response = ""
 
159
  top_p=top_p,
160
  ):
161
  token = chunk.choices[0].delta.content
162
+ if token: # Check if the token is not empty
163
+ response += token
164
+ # Yield the *updated* history, including the *current* partial response.
165
+ yield history + [(message, response)]
166
+
167
  except Exception as e:
168
  print(f"An error occurred: {e}")
169
+ error_message = "I'm sorry, I encountered an error. Please try again."
170
+ yield history + [(message, error_message)]
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
+ def clear_memory(history, chat_history):
174
+ """Clears the conversation history and resets the chatbot."""
175
+ history.clear()
176
+ chat_history.clear()
177
+ return [], []
 
 
178
 
 
 
179
 
 
 
180
 
181
+ # --- Gradio Interface ---
182
+ with gr.Blocks() as demo:
183
+ chatbot = gr.Chatbot(label="Roos NVC Chatbot")
184
+ msg = gr.Textbox(label="Your Message", placeholder="Type your message here...") # Added placeholder
185
  with gr.Row():
186
+ send_btn = gr.Button("Send")
187
  clear_btn = gr.Button("Clear Memory")
188
 
189
+ with gr.Accordion("Settings", open=False):
190
+ system_message = gr.Textbox(value=nvc_prompt_template, label="System message")
191
+ max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
192
+ temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
193
+ top_p = gr.Slider(
194
+ minimum=0.1,
195
+ maximum=1.0,
196
+ value=0.95,
197
+ step=0.05,
198
+ label="Top-p (nucleus sampling)",
199
+ )
200
+
201
+
202
+ # Connect both Enter key *and* Send button to the respond function
203
+ msg.submit(respond, [msg, chatbot, system_message, max_tokens, temperature, top_p], chatbot).then(lambda: "", None, msg) # Clear input after submit
204
+ send_btn.click(respond, [msg, chatbot, system_message, max_tokens, temperature, top_p], chatbot).then(lambda: "", None, msg) # Clear input after click
205
+ clear_btn.click(clear_memory, [chatbot, chatbot], [msg, chatbot])
206
 
207
  if __name__ == "__main__":
208
+ demo.launch()