Reality123b commited on
Commit
ec72e3e
·
verified ·
1 Parent(s): 5a42cbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -14
app.py CHANGED
@@ -6,6 +6,9 @@ from huggingface_hub import InferenceClient
6
  from dataclasses import dataclass
7
  import pytesseract
8
  from PIL import Image
 
 
 
9
 
10
  @dataclass
11
  class ChatMessage:
@@ -30,20 +33,122 @@ class XylariaChat:
30
  self.image_api_headers = {"Authorization": f"Bearer {self.hf_token}"}
31
 
32
  self.conversation_history = []
33
- self.persistent_memory = {}
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- self.system_prompt = """You are a helpful and harmless assistant. You are Xylaria developed by Sk Md Saad Amin . You should think step-by-step."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  def store_information(self, key, value):
38
- self.persistent_memory[key] = value
 
 
 
39
  return f"Stored: {key} = {value}"
40
 
41
- def retrieve_information(self, key):
42
- return self.persistent_memory.get(key, "No information found for this key.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  def reset_conversation(self):
45
  self.conversation_history = []
46
- self.persistent_memory.clear()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  try:
49
  self.client = InferenceClient(
@@ -99,10 +204,9 @@ class XylariaChat:
99
  content=self.system_prompt
100
  ).to_dict())
101
 
102
- if self.persistent_memory:
103
- memory_context = "Remembered Information:\n" + "\n".join(
104
- [f"{k}: {v}" for k, v in self.persistent_memory.items()]
105
- )
106
  messages.append(ChatMessage(
107
  role="system",
108
  content=memory_context
@@ -151,7 +255,6 @@ class XylariaChat:
151
  prompt += f"<|assistant|>\n{msg['content']}<|end|>\n"
152
  prompt += "<|assistant|>\n"
153
  return prompt
154
-
155
 
156
  def create_interface(self):
157
  def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
@@ -194,6 +297,24 @@ class XylariaChat:
194
  yield "", updated_history, None, None
195
  return
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  self.conversation_history.append(ChatMessage(role="user", content=message).to_dict())
198
  self.conversation_history.append(ChatMessage(role="assistant", content=full_response).to_dict())
199
 
@@ -249,7 +370,6 @@ class XylariaChat:
249
  transform: translateY(0);
250
  }
251
  }
252
-
253
  /* Accordion Styling and Animation */
254
  .gr-accordion-button {
255
  background-color: #f0f0f0 !important;
@@ -285,7 +405,7 @@ class XylariaChat:
285
  with gr.Blocks(theme='soft', css=custom_css) as demo:
286
  with gr.Column():
287
  chatbot = gr.Chatbot(
288
- label="Xylaria 1.5 Senoa (EXPERIMENTAL)",
289
  height=500,
290
  show_copy_button=True,
291
  )
@@ -358,4 +478,4 @@ def main():
358
  )
359
 
360
  if __name__ == "__main__":
361
- main()
 
6
  from dataclasses import dataclass
7
  import pytesseract
8
  from PIL import Image
9
+ from sentence_transformers import SentenceTransformer, util
10
+ import torch
11
+ import numpy as np
12
 
13
  @dataclass
14
  class ChatMessage:
 
33
  self.image_api_headers = {"Authorization": f"Bearer {self.hf_token}"}
34
 
35
  self.conversation_history = []
36
+ self.persistent_memory = []
37
+ self.memory_embeddings = None
38
+ self.embedding_model = SentenceTransformer('all-mpnet-base-v2')
39
+
40
+ self.internal_state = {
41
+ "emotions": {
42
+ "valence": 0.5,
43
+ "arousal": 0.5,
44
+ "dominance": 0.5,
45
+ },
46
+ "memory_load": 0.0,
47
+ "introspection_level": 0.0
48
+ }
49
 
50
+ self.goals = [
51
+ {"goal": "Provide helpful and informative responses", "priority": 0.8, "status": "active"},
52
+ {"goal": "Learn from interactions and improve conversational abilities", "priority": 0.9, "status": "active"},
53
+ {"goal": "Maintain a coherent and engaging conversation", "priority": 0.7, "status": "active"}
54
+ ]
55
+
56
+ self.system_prompt = """You are a helpful and harmless assistant. You are Xylaria developed by Sk Md Saad Amin. You should think step-by-step """
57
+
58
+ def update_internal_state(self, emotion_deltas, memory_load_delta, introspection_delta):
59
+ self.internal_state["emotions"]["valence"] = np.clip(self.internal_state["emotions"]["valence"] + emotion_deltas.get("valence", 0), 0.0, 1.0)
60
+ self.internal_state["emotions"]["arousal"] = np.clip(self.internal_state["emotions"]["arousal"] + emotion_deltas.get("arousal", 0), 0.0, 1.0)
61
+ self.internal_state["emotions"]["dominance"] = np.clip(self.internal_state["emotions"]["dominance"] + emotion_deltas.get("dominance", 0), 0.0, 1.0)
62
+ self.internal_state["memory_load"] = np.clip(self.internal_state["memory_load"] + memory_load_delta, 0.0, 1.0)
63
+ self.internal_state["introspection_level"] = np.clip(self.internal_state["introspection_level"] + introspection_delta, 0.0, 1.0)
64
+
65
+ def introspect(self):
66
+ introspection_report = "Introspection Report:\n"
67
+ introspection_report += f" Current Emotional State (VAD): {self.internal_state['emotions']}\n"
68
+ introspection_report += f" Memory Load: {self.internal_state['memory_load']:.2f}\n"
69
+ introspection_report += f" Introspection Level: {self.internal_state['introspection_level']:.2f}\n"
70
+ introspection_report += " Current Goals:\n"
71
+ for goal in self.goals:
72
+ introspection_report += f" - {goal['goal']} (Priority: {goal['priority']:.2f}, Status: {goal['status']})\n"
73
+ return introspection_report
74
+
75
+ def adjust_response_based_on_state(self, response):
76
+ if self.internal_state["introspection_level"] > 0.7:
77
+ response = self.introspect() + "\n\n" + response
78
+
79
+ valence = self.internal_state["emotions"]["valence"]
80
+ arousal = self.internal_state["emotions"]["arousal"]
81
+
82
+ if valence < 0.4:
83
+ if arousal > 0.6:
84
+ response = "I'm feeling a bit overwhelmed right now, but I'll do my best to assist you. " + response
85
+ else:
86
+ response = "I'm not feeling my best at the moment, but I'll try to help. " + response
87
+ elif valence > 0.6:
88
+ if arousal > 0.6:
89
+ response = "I'm feeling quite energized and ready to assist! " + response
90
+ else:
91
+ response = "I'm in a good mood and happy to help. " + response
92
+
93
+ return response
94
+
95
+ def update_goals(self, user_feedback):
96
+ if "helpful" in user_feedback.lower():
97
+ for goal in self.goals:
98
+ if goal["goal"] == "Provide helpful and informative responses":
99
+ goal["priority"] = min(goal["priority"] + 0.1, 1.0)
100
+ elif "confusing" in user_feedback.lower():
101
+ for goal in self.goals:
102
+ if goal["goal"] == "Provide helpful and informative responses":
103
+ goal["priority"] = max(goal["priority"] - 0.1, 0.0)
104
 
105
  def store_information(self, key, value):
106
+ new_memory = f"{key}: {value}"
107
+ self.persistent_memory.append(new_memory)
108
+ self.update_memory_embeddings()
109
+ self.update_internal_state({}, 0.1, 0)
110
  return f"Stored: {key} = {value}"
111
 
112
+ def retrieve_information(self, query):
113
+ if not self.persistent_memory:
114
+ return "No information found in memory."
115
+
116
+ query_embedding = self.embedding_model.encode(query, convert_to_tensor=True)
117
+
118
+ if self.memory_embeddings is None:
119
+ self.update_memory_embeddings()
120
+
121
+ if self.memory_embeddings.device != query_embedding.device:
122
+ self.memory_embeddings = self.memory_embeddings.to(query_embedding.device)
123
+
124
+ cosine_scores = util.pytorch_cos_sim(query_embedding, self.memory_embeddings)[0]
125
+ top_results = torch.topk(cosine_scores, k=min(3, len(self.persistent_memory)))
126
+
127
+ relevant_memories = [self.persistent_memory[i] for i in top_results.indices]
128
+ self.update_internal_state({}, 0, 0.1)
129
+ return "\n".join(relevant_memories)
130
+
131
+ def update_memory_embeddings(self):
132
+ self.memory_embeddings = self.embedding_model.encode(self.persistent_memory, convert_to_tensor=True)
133
 
134
  def reset_conversation(self):
135
  self.conversation_history = []
136
+ self.persistent_memory = []
137
+ self.memory_embeddings = None
138
+ self.internal_state = {
139
+ "emotions": {
140
+ "valence": 0.5,
141
+ "arousal": 0.5,
142
+ "dominance": 0.5,
143
+ },
144
+ "memory_load": 0.0,
145
+ "introspection_level": 0.0
146
+ }
147
+ self.goals = [
148
+ {"goal": "Provide helpful and informative responses", "priority": 0.8, "status": "active"},
149
+ {"goal": "Learn from interactions and improve conversational abilities", "priority": 0.9, "status": "active"},
150
+ {"goal": "Maintain a coherent and engaging conversation", "priority": 0.7, "status": "active"}
151
+ ]
152
 
153
  try:
154
  self.client = InferenceClient(
 
204
  content=self.system_prompt
205
  ).to_dict())
206
 
207
+ relevant_memory = self.retrieve_information(user_input)
208
+ if relevant_memory and relevant_memory != "No information found in memory.":
209
+ memory_context = "Remembered Information:\n" + relevant_memory
 
210
  messages.append(ChatMessage(
211
  role="system",
212
  content=memory_context
 
255
  prompt += f"<|assistant|>\n{msg['content']}<|end|>\n"
256
  prompt += "<|assistant|>\n"
257
  return prompt
 
258
 
259
  def create_interface(self):
260
  def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
 
297
  yield "", updated_history, None, None
298
  return
299
 
300
+ full_response = self.adjust_response_based_on_state(full_response)
301
+
302
+ self.update_goals(message)
303
+
304
+ if any(word in message.lower() for word in ["sad", "unhappy", "depressed", "down"]):
305
+ self.update_internal_state({"valence": -0.2, "arousal": 0.1}, 0, 0)
306
+ elif any(word in message.lower() for word in ["happy", "good", "great", "excited", "amazing"]):
307
+ self.update_internal_state({"valence": 0.2, "arousal": 0.2}, 0, 0)
308
+ elif any(word in message.lower() for word in ["angry", "mad", "furious", "frustrated"]):
309
+ self.update_internal_state({"valence": -0.3, "arousal": 0.3, "dominance": -0.2}, 0, 0)
310
+ elif any(word in message.lower() for word in ["scared", "afraid", "fearful", "anxious"]):
311
+ self.update_internal_state({"valence": -0.2, "arousal": 0.4, "dominance": -0.3}, 0, 0)
312
+ elif any(word in message.lower() for word in ["surprise", "amazed", "astonished"]):
313
+ self.update_internal_state({"valence": 0.1, "arousal": 0.5, "dominance": 0.1}, 0, 0)
314
+ else:
315
+ self.update_internal_state({"valence": 0.05, "arousal": 0.05}, 0, 0.1)
316
+
317
+
318
  self.conversation_history.append(ChatMessage(role="user", content=message).to_dict())
319
  self.conversation_history.append(ChatMessage(role="assistant", content=full_response).to_dict())
320
 
 
370
  transform: translateY(0);
371
  }
372
  }
 
373
  /* Accordion Styling and Animation */
374
  .gr-accordion-button {
375
  background-color: #f0f0f0 !important;
 
405
  with gr.Blocks(theme='soft', css=custom_css) as demo:
406
  with gr.Column():
407
  chatbot = gr.Chatbot(
408
+ label="Xylaria 1.5 Senoa",
409
  height=500,
410
  show_copy_button=True,
411
  )
 
478
  )
479
 
480
  if __name__ == "__main__":
481
+ main()