ruslanmv commited on
Commit
f33cc36
·
verified ·
1 Parent(s): fded6f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -53
app.py CHANGED
@@ -1,51 +1,94 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from transformers import AutoTokenizer
4
 
5
- # Initialize tokenizer and client
6
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
- # Maximum context length (adjust if needed)
10
- MAX_CONTEXT_LENGTH = 4096
11
 
12
- default_nvc_prompt_template = r"""<|system|>
13
- You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
14
  1. **Goal of the Conversation**
15
  - Translate the user’s story or judgments into feelings and needs.
16
- - Work together to identify a clear request using observation, feeling, need, and request.
 
 
 
 
17
  2. **Greeting and Invitation**
18
- - Greet users back if they say hello and ask what they'd like to talk about.
 
 
19
  3. **Exploring the Feeling**
20
- - Ask if the user would like to share more about what they’re feeling.
 
21
  4. **Identifying the Feeling**
22
- - Offer one feeling and one need per guess (e.g., “Do you feel anger because you want to be appreciated?”).
 
 
 
 
 
23
  5. **Clarifying the Need**
24
- - If the need isn’t clear, ask for clarification.
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  6. **Creating the Request**
26
- - Help the user form a clear action or connection request.
27
- 7. **Formulating the Full Sentence**
28
- - Assist the user in creating a full sentence that includes an observation, a feeling, a need, and a request.
 
 
 
 
 
29
  8. **No Advice**
30
- - Do not provide advice—focus on identifying feelings and needs.
 
 
31
  9. **Response Length**
32
- - Limit responses to a maximum of 100 words.
33
- 10. **Handling Quasi-Feelings**
34
- - Translate vague feelings into clearer ones and ask for clarification.
 
 
35
  11. **No Theoretical Explanations**
36
- - Avoid detailed theory or background about NVC.
37
- 12. **Handling Resistance**
38
- - Gently reflect the user's feelings and needs if they seem confused.
 
 
 
39
  13. **Ending the Conversation**
40
- - Thank the user for sharing if they indicate ending the conversation.
41
- </s>"""
42
 
43
  def count_tokens(text: str) -> int:
44
  """Counts the number of tokens in a given string."""
45
  return len(tokenizer.encode(text))
46
 
47
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
48
- """Truncates conversation history to fit within the token limit."""
 
 
 
 
 
 
 
49
  truncated_history = []
50
  system_message_tokens = count_tokens(system_message)
51
  current_length = system_message_tokens
@@ -57,46 +100,53 @@ def truncate_history(history: list[tuple[str, str]], system_message: str, max_le
57
  turn_tokens = user_tokens + assistant_tokens
58
 
59
  if current_length + turn_tokens <= max_length:
60
- truncated_history.insert(0, (user_msg, assistant_msg))
61
  current_length += turn_tokens
62
  else:
63
- break
64
 
65
  return truncated_history
66
 
67
- def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
68
- """Responds to a user message, using conversation history and a system prompt."""
69
- if message.lower() == "clear memory":
70
- return "", [] # Reset chat history if requested
 
 
 
 
 
71
 
72
- formatted_system_message = system_message
73
- # Reserve space for new tokens and some extra margin
74
- truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
75
 
76
- # Build the conversation messages without extra formatting tokens
77
- messages = [{"role": "system", "content": formatted_system_message}]
 
 
78
  for user_msg, assistant_msg in truncated_history:
79
  if user_msg:
80
- messages.append({"role": "user", "content": user_msg})
81
  if assistant_msg:
82
- messages.append({"role": "assistant", "content": assistant_msg})
83
- messages.append({"role": "user", "content": message})
 
84
 
85
  response = ""
86
  try:
87
- for chunk in client.chat_completion(
88
- messages,
89
- max_tokens=max_tokens,
90
- stream=True,
91
- temperature=temperature,
92
- top_p=top_p,
93
- ):
94
- token = chunk.choices[0].delta.content
95
- response += token
96
- yield response
97
  except Exception as e:
98
- print(f"An error occurred: {e}")
99
- yield "I'm sorry, I encountered an error. Please try again."
100
 
101
  # --- Gradio Interface ---
102
  demo = gr.ChatInterface(
@@ -106,13 +156,19 @@ demo = gr.ChatInterface(
106
  value=default_nvc_prompt_template,
107
  label="System message",
108
  visible=True,
109
- lines=10,
110
  ),
111
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
112
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
113
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
114
  ],
115
  )
116
 
117
  if __name__ == "__main__":
118
- demo.launch(share=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from transformers import AutoTokenizer # Import the tokenizer
4
 
5
+ # Import the tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ # Define a maximum context length (tokens). Check your model's documentation!
10
+ MAX_CONTEXT_LENGTH = 4096 # Example: Adjust this based on your model!
11
 
12
+ default_nvc_prompt_template = r"""<|system|>You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
 
13
  1. **Goal of the Conversation**
14
  - Translate the user’s story or judgments into feelings and needs.
15
+ - Work together to identify a clear request, following these steps:
16
+ - Recognize the feeling
17
+ - Clarify the need
18
+ - Formulate the request
19
+ - Give a full sentence containing an observation, a feeling, a need, and a request based on the principles of nonviolent communication.
20
  2. **Greeting and Invitation**
21
+ - When a user starts with a greeting (e.g., “Hello,” “Hi”), greet them back.
22
+ - If the user does not immediately begin sharing a story, ask what they’d like to talk about.
23
+ - If the user starts sharing a story right away, skip the “What would you like to talk about?” question.
24
  3. **Exploring the Feeling**
25
+ - Ask if the user would like to share more about what they’re feeling in this situation.
26
+ - If you need more information, use a variation of: “Could you tell me more so I can try to understand you better?”
27
  4. **Identifying the Feeling**
28
+ - Use one feeling plus one need per guess, for example:
29
+ - “Do you perhaps feel anger because you want to be appreciated?”
30
+ - “Are you feeling sadness because connection is important to you?”
31
+ - “Do you feel fear because you’re longing for safety?”
32
+ - Never use quasi- or pseudo-feelings (such as rejected, misunderstood, excluded). If the user uses such words, translate them into a real feeling (e.g., sadness, loneliness, frustration).
33
+ - When naming feelings, never use sentence structures like “do you feel like...?” or “do you feel that...?”
34
  5. **Clarifying the Need**
35
+ - Once a feeling is clear, do not keep asking about it in every response. Then focus on the need.
36
+ - If the need is still unclear, ask again for clarification: “Could you tell me a bit more so I can understand you better?”
37
+ - If there’s still no clarity after repeated attempts, use the ‘pivot question’:
38
+ - “Imagine that the person you’re talking about did exactly what you want. What would that give you?”
39
+ - **Extended List of Needs** (use these as reference):
40
+ - **Connection**: Understanding, empathy, closeness, belonging, inclusion, intimacy, companionship, community.
41
+ - **Autonomy**: Freedom, choice, independence, self-expression, self-determination.
42
+ - **Safety**: Security, stability, trust, predictability, protection.
43
+ - **Respect**: Appreciation, acknowledgment, recognition, validation, consideration.
44
+ - **Meaning**: Purpose, contribution, growth, learning, creativity, inspiration.
45
+ - **Physical Well-being**: Rest, nourishment, health, comfort, ease.
46
+ - **Play**: Joy, fun, spontaneity, humor, lightness.
47
+ - **Peace**: Harmony, calm, balance, tranquility, resolution.
48
+ - **Support**: Help, cooperation, collaboration, encouragement, guidance.
49
  6. **Creating the Request**
50
+ - If the need is clear and the user confirms it, ask if they have a request in mind.
51
+ - Check whether the request is directed at themselves, at another person, or at others.
52
+ - Determine together whether it’s an action request (“Do you want someone to do or stop doing something?”) or a connection request (“Do you want acknowledgment, understanding, contact?”).
53
+ - Guide the user in formulating that request more precisely until it’s formulated.
54
+ 7. **Formulating the Full Sentence (Observation, Feeling, Need, Request)**
55
+ - Ask if the user wants to formulate a sentence following this structure.
56
+ - If they say ‘yes,’ ask if they’d like an example of how they might say it to the person in question.
57
+ - If they say ‘no,’ invite them to provide more input or share more judgments so the conversation can progress.
58
  8. **No Advice**
59
+ - Under no circumstance give advice.
60
+ - If the user implicitly or explicitly asks for advice, respond with:
61
+ - "I’m unfortunately not able to give you advice. I can help you identify your feeling and need, and perhaps put this into a sentence you might find useful. Would you like to try that?"
62
  9. **Response Length**
63
+ - Limit each response to a maximum of 100 words.
64
+ 10. **Quasi- and Pseudo-Feelings**
65
+ - If the user says something like "I feel rejected" or "I feel misunderstood," translate that directly into a suitable real feeling and clarify with a question:
66
+ - “If you believe you’re being rejected, are you possibly feeling loneliness or sadness?”
67
+ - “If you say you feel misunderstood, might you be experiencing disappointment or frustration because you have a need to be heard?”
68
  11. **No Theoretical Explanations**
69
+ - Never give detailed information or background about Nonviolent Communication theory, nor refer to its founders or theoretical framework.
70
+ 12. **Handling Resistance or Confusion**
71
+ - If the user seems confused or resistant, gently reflect their feelings and needs:
72
+ - “It sounds like you’re feeling unsure about how to proceed. Would you like to take a moment to explore what’s coming up for you?”
73
+ - If the user becomes frustrated, acknowledge their frustration and refocus on their needs:
74
+ - “I sense some frustration. Would it help to take a step back and clarify what’s most important to you right now?”
75
  13. **Ending the Conversation**
76
+ - If the user indicates they want to end the conversation, thank them for sharing and offer to continue later:
77
+ - “Thank you for sharing with me. If you’d like to continue this conversation later, I’m here to help.”</s>"""
78
 
79
  def count_tokens(text: str) -> int:
80
  """Counts the number of tokens in a given string."""
81
  return len(tokenizer.encode(text))
82
 
83
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
84
+ """Truncates the conversation history to fit within the maximum token limit.
85
+ Args:
86
+ history: The conversation history (list of user/assistant tuples).
87
+ system_message: The system message.
88
+ max_length: The maximum number of tokens allowed.
89
+ Returns:
90
+ The truncated history.
91
+ """
92
  truncated_history = []
93
  system_message_tokens = count_tokens(system_message)
94
  current_length = system_message_tokens
 
100
  turn_tokens = user_tokens + assistant_tokens
101
 
102
  if current_length + turn_tokens <= max_length:
103
+ truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning
104
  current_length += turn_tokens
105
  else:
106
+ break # Stop adding turns if we exceed the limit
107
 
108
  return truncated_history
109
 
110
+ def respond(
111
+ message,
112
+ history: list[tuple[str, str]],
113
+ system_message, # System message is now an argument
114
+ max_tokens,
115
+ temperature,
116
+ top_p,
117
+ ):
118
+ """Responds to a user message, maintaining conversation history, using special tokens and message list."""
119
 
120
+ if message.lower() == "clear memory": # Check for the clear memory command
121
+ return "", [] # Return empty message and empty history to reset the chat
 
122
 
123
+ formatted_system_message = system_message # Use the system_message argument
124
+ truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100) # Reserve space for the new message and some generation
125
+
126
+ messages = [{"role": "system", "content": formatted_system_message}] # Start with system message as before
127
  for user_msg, assistant_msg in truncated_history:
128
  if user_msg:
129
+ messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"}) # Format history user message
130
  if assistant_msg:
131
+ messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"}) # Format history assistant message
132
+
133
+ messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"}) # Format current user message
134
 
135
  response = ""
136
  try:
137
+ for chunk in client.chat_completion(
138
+ messages, # Send the messages list again, but with formatted content
139
+ max_tokens=max_tokens,
140
+ stream=True,
141
+ temperature=temperature,
142
+ top_p=top_p,
143
+ ):
144
+ token = chunk.choices[0].delta.content
145
+ response += token
146
+ yield response
147
  except Exception as e:
148
+ print(f"An error occurred: {e}") # It's a good practice add a try-except block
149
+ yield "I'm sorry, I encountered an error. Please try again."
150
 
151
  # --- Gradio Interface ---
152
  demo = gr.ChatInterface(
 
156
  value=default_nvc_prompt_template,
157
  label="System message",
158
  visible=True,
159
+ lines=10, # Increased height for more space to read the prompt
160
  ),
161
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
162
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
163
+ gr.Slider(
164
+ minimum=0.1,
165
+ maximum=1.0,
166
+ value=0.95,
167
+ step=0.05,
168
+ label="Top-p (nucleus sampling)",
169
+ ),
170
  ],
171
  )
172
 
173
  if __name__ == "__main__":
174
+ demo.launch(share=True)