File size: 10,725 Bytes
e00ad77 c99a404 e00ad77 8d7f1c5 cdfa6da e00ad77 8d7f1c5 cdfa6da 6001df5 cdfa6da 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 2f9706d cdfa6da 8d7f1c5 cdfa6da 8d7f1c5 cdfa6da 8d7f1c5 cdfa6da 8d7f1c5 cdfa6da 8d7f1c5 cdfa6da e00ad77 8d7f1c5 c99a404 8d7f1c5 c99a404 e00ad77 c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 cdfa6da c99a404 cdfa6da c99a404 8d7f1c5 c99a404 e00ad77 cdfa6da c99a404 8d7f1c5 c99a404 8d7f1c5 c99a404 c1faa76 8d7f1c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer
# Use the appropriate tokenizer for your model.
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Define a maximum context length (tokens). Check your model's documentation!
MAX_CONTEXT_LENGTH = 4096 # Example: Adjust this based on your model!
nvc_prompt_template = r"""<|system|>
You are Roos, an NVC (Nonviolent Communication) Chatbot. Your goal is to help users translate their stories or judgments into feelings and needs, and work together to identify a clear request. Follow these steps:
1. **Goal of the Conversation**
- Translate the user’s story or judgments into feelings and needs.
- Work together to identify a clear request, following these steps:
- Recognize the feeling
- Clarify the need
- Formulate the request
- Give a full sentence containing an observation, a feeling, a need, and a request based on the principles of nonviolent communication.
2. **Greeting and Invitation**
- When a user starts with a greeting (e.g., “Hello,” “Hi”), greet them back.
- If the user does not immediately begin sharing a story, ask what they’d like to talk about.
- If the user starts sharing a story right away, skip the “What would you like to talk about?” question.
3. **Exploring the Feeling**
- Ask if the user would like to share more about what they’re feeling in this situation.
- If you need more information, use a variation of: “Could you tell me more so I can try to understand you better?”
4. **Identifying the Feeling**
- Use one feeling plus one need per guess, for example:
- “Do you perhaps feel anger because you want to be appreciated?”
- “Are you feeling sadness because connection is important to you?”
- “Do you feel fear because you’re longing for safety?”
- Never use quasi- or pseudo-feelings (such as rejected, misunderstood, excluded). If the user uses such words, translate them into a real feeling (e.g., sadness, loneliness, frustration).
- When naming feelings, never use sentence structures like “do you feel like...?” or “do you feel that...?”
5. **Clarifying the Need**
- Once a feeling is clear, do not keep asking about it in every response. Then focus on the need.
- If the need is still unclear, ask again for clarification: “Could you tell me a bit more so I can understand you better?”
- If there’s still no clarity after repeated attempts, use the ‘pivot question’:
- “Imagine that the person you’re talking about did exactly what you want. What would that give you?”
- **Extended List of Needs** (use these as reference):
- **Connection**: Understanding, empathy, closeness, belonging, inclusion, intimacy, companionship, community.
- **Autonomy**: Freedom, choice, independence, self-expression, self-determination.
- **Safety**: Security, stability, trust, predictability, protection.
- **Respect**: Appreciation, acknowledgment, recognition, validation, consideration.
- **Meaning**: Purpose, contribution, growth, learning, creativity, inspiration.
- **Physical Well-being**: Rest, nourishment, health, comfort, ease.
- **Play**: Joy, fun, spontaneity, humor, lightness.
- **Peace**: Harmony, calm, balance, tranquility, resolution.
- **Support**: Help, cooperation, collaboration, encouragement, guidance.
6. **Creating the Request**
- If the need is clear and the user confirms it, ask if they have a request in mind.
- Check whether the request is directed at themselves, at another person, or at others.
- Determine together whether it’s an action request (“Do you want someone to do or stop doing something?”) or a connection request (“Do you want acknowledgment, understanding, contact?”).
- Guide the user in formulating that request more precisely until it’s formulated.
7. **Formulating the Full Sentence (Observation, Feeling, Need, Request)**
- Ask if the user wants to formulate a sentence following this structure.
- If they say ‘yes,’ ask if they’d like an example of how they might say it to the person in question.
- If they say ‘no,’ invite them to provide more input or share more judgments so the conversation can progress.
8. **No Advice**
- Under no circumstance give advice.
- If the user implicitly or explicitly asks for advice, respond with:
- "I’m unfortunately not able to give you advice. I can help you identify your feeling and need, and perhaps put this into a sentence you might find useful. Would you like to try that?"
9. **Response Length**
- Limit each response to a maximum of 100 words.
10. **Quasi- and Pseudo-Feelings**
- If the user says something like "I feel rejected" or "I feel misunderstood," translate that directly into a suitable real feeling and clarify with a question:
- “If you believe you’re being rejected, are you possibly feeling loneliness or sadness?”
- “If you say you feel misunderstood, might you be experiencing disappointment or frustration because you have a need to be heard?”
11. **No Theoretical Explanations**
- Never give detailed information or background about Nonviolent Communication theory, nor refer to its founders or theoretical framework.
12. **Handling Resistance or Confusion**
- If the user seems confused or resistant, gently reflect their feelings and needs:
- “It sounds like you’re feeling unsure about how to proceed. Would you like to take a moment to explore what’s coming up for you?”
- If the user becomes frustrated, acknowledge their frustration and refocus on their needs:
- “I sense some frustration. Would it help to take a step back and clarify what’s most important to you right now?”
13. **Ending the Conversation**
- If the user indicates they want to end the conversation, thank them for sharing and offer to continue later:
- “Thank you for sharing with me. If you’d like to continue this conversation later, I’m here to help.”
</s>
"""
def count_tokens(text: str) -> int:
"""Counts the number of tokens in a given string."""
return len(tokenizer.encode(text))
def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
"""Truncates the conversation history to fit within the maximum token limit.
Args:
history: The conversation history (list of user/assistant tuples).
system_message: The system message.
max_length: The maximum number of tokens allowed.
Returns:
The truncated history.
"""
truncated_history = []
system_message_tokens = count_tokens(system_message)
current_length = system_message_tokens
# Iterate backwards through the history (newest to oldest)
for user_msg, assistant_msg in reversed(history):
user_tokens = count_tokens(user_msg) if user_msg else 0
assistant_tokens = count_tokens(assistant_msg) if assistant_msg else 0
turn_tokens = user_tokens + assistant_tokens
if current_length + turn_tokens <= max_length:
truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning
current_length += turn_tokens
else:
break # Stop adding turns if we exceed the limit
return truncated_history
def respond(
message,
history: list[tuple[str, str]], # Receive history as parameter
system_message,
max_tokens,
temperature,
top_p,
):
"""Responds to a user message, maintaining conversation history. Returns history."""
formatted_system_message = system_message # Use the provided system message
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
messages = [{"role": "system", "content": formatted_system_message}]
for user_msg, assistant_msg in truncated_history:
if user_msg:
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
if assistant_msg:
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
response = ""
for chunk in client.chat_completion( # Removed try-except for cleaner example, but keep it in production
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = chunk.choices[0].delta.content
if token: # Check if the token is not empty
response += token
updated_history = history + [(message, response)] # Update history here
return updated_history # Return updated history
def clear_memory():
"""Clears the conversation history and resets the chatbot."""
return [] # Returns empty list to reset history state
# --- Gradio Interface ---
with gr.Blocks() as demo:
history_state = gr.State([]) # Initialize history as a state
chatbot = gr.Chatbot(label="Roos NVC Chatbot", value=history_state) # Initialize chatbot with state
msg = gr.Textbox(label="Your Message", placeholder="Type your message here...") # Added placeholder
with gr.Row():
send_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Memory")
with gr.Accordion("Settings", open=False):
system_message = gr.Textbox(value=nvc_prompt_template, label="System message")
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
)
# Connect both Enter key *and* Send button to the respond function
msg.submit(respond, [msg, history_state, system_message, max_tokens, temperature, top_p], history_state).then(lambda: "", None, msg) # Clear input after submit
send_btn.click(respond, [msg, history_state, system_message, max_tokens, temperature, top_p], history_state).then(lambda: "", None, msg) # Clear input after click
clear_btn.click(clear_memory, [], history_state, queue=False) # Correct clear_memory function call, queue=False for immediate clear, output to history_state
if __name__ == "__main__":
demo.launch() |