Raiff1982 commited on
Commit
fab796d
·
verified ·
1 Parent(s): e05b725

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -254
app.py CHANGED
@@ -1,254 +0,0 @@
1
- import os
2
- import json
3
- import logging
4
- from datetime import datetime
5
- from collections import defaultdict
6
- from typing import Dict, List, Any, Optional
7
- import numpy as np
8
- from sklearn.ensemble import IsolationForest
9
- import openai
10
- from dotenv import load_dotenv
11
-
12
- load_dotenv()
13
-
14
- logging.basicConfig(
15
- level=logging.INFO,
16
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
17
- handlers=[
18
- logging.FileHandler("ai_system.log"),
19
- logging.StreamHandler()
20
- ]
21
- )
22
- logger = logging.getLogger(__name__)
23
-
24
- openai.api_key = os.getenv("OPENAI_API_KEY")
25
-
26
- class MemoryStore:
27
- def __init__(self, persistence_file: str = "memory_store.json"):
28
- self.session_memories = defaultdict(list)
29
- self.persistent_memories = []
30
- self.persistence_file = persistence_file
31
- self.recall_weights = defaultdict(float)
32
- self.sentiment_history = []
33
- self.load_memory()
34
-
35
- def add_memory(self, key: str, content: Any, domain: str, sentiment: float = 0.0):
36
- """Store memories with contextual linking"""
37
- memory = {
38
- "content": content,
39
- "timestamp": datetime.now().isoformat(),
40
- "domain": domain,
41
- "access_count": 0,
42
- "sentiment": sentiment,
43
- "associations": []
44
- }
45
-
46
- # Cross-domain linking
47
- if self.persistent_memories:
48
- last_memory = self.persistent_memories[-1]
49
- memory["associations"] = self._find_associations(last_memory["content"], content)
50
-
51
- self.session_memories[key].append(memory)
52
- self.persistent_memories.append(memory)
53
- self._update_recall_weight(key, boost=1.2)
54
- self.prune_memories()
55
-
56
- def recall(self, key: str, context: str = None) -> List[Any]:
57
- """Context-aware recall with adaptive weights"""
58
- memories = [m for m in self.persistent_memories if key in m["content"]]
59
-
60
- if context:
61
- memories = self._contextual_filter(memories, context)
62
-
63
- # Apply temporal decay and frequency weights
64
- weights = [
65
- self.recall_weights[key] *
66
- (1 / (1 + self._days_since(m["timestamp"]))) *
67
- (1 + m["access_count"] * 0.1)
68
- for m in memories
69
- ]
70
-
71
- return sorted(memories, key=lambda x: x["access_count"], reverse=True)[:10]
72
-
73
- def _find_associations(self, existing: str, new: str) -> List[str]:
74
- """Semantic linking between concepts"""
75
- # Placeholder for actual semantic similarity model
76
- return list(set(existing.split()) & set(new.split()))
77
-
78
- def _contextual_filter(self, memories: List[dict], context: str) -> List[dict]:
79
- """Filter memories based on contextual relevance"""
80
- # Placeholder for actual contextual similarity model
81
- return [m for m in memories if context.lower() in m["content"].lower()]
82
-
83
- def _days_since(self, timestamp: str) -> float:
84
- return (datetime.now() - datetime.fromisoformat(timestamp)).days
85
-
86
- def _update_recall_weight(self, key: str, boost: float = 1.0):
87
- self.recall_weights[key] = min(self.recall_weights[key] * boost, 5.0)
88
-
89
- def prune_memories(self):
90
- """Modular pruning system with anomaly detection"""
91
- # Remove less relevant memories using isolation forest
92
- if len(self.persistent_memories) > 1000:
93
- X = np.array([len(m["content"]) for m in self.persistent_memories]).reshape(-1,1)
94
- clf = IsolationForest(contamination=0.1)
95
- preds = clf.fit_predict(X)
96
- self.persistent_memories = [m for m,p in zip(self.persistent_memories, preds) if p == 1]
97
-
98
- def save_memory(self):
99
- with open(self.persistence_file, "w") as f:
100
- json.dump({
101
- "persistent": self.persistent_memories,
102
- "weights": self.recall_weights
103
- }, f)
104
-
105
- def load_memory(self):
106
- try:
107
- with open(self.persistence_file, "r") as f:
108
- data = json.load(f)
109
- self.persistent_memories = data.get("persistent", [])
110
- self.recall_weights = defaultdict(float, data.get("weights", {}))
111
- except FileNotFoundError:
112
- pass
113
-
114
- class SentientGPT:
115
- def __init__(self):
116
- self.memory = MemoryStore()
117
- self.session_context = defaultdict(dict)
118
- self.sentiment_window = []
119
- self.engagement_history = []
120
-
121
- def _track_engagement(self, response: str):
122
- """Track user engagement patterns"""
123
- engagement = {
124
- "timestamp": datetime.now(),
125
- "response_length": len(response),
126
- "complexity": self._calculate_complexity(response)
127
- }
128
- self.engagement_history.append(engagement)
129
-
130
- if len(self.engagement_history) > 100:
131
- self.engagement_history.pop(0)
132
-
133
- def _calculate_complexity(self, text: str) -> float:
134
- """Calculate text complexity score"""
135
- words = text.split()
136
- unique_words = len(set(words))
137
- return (unique_words / len(words)) if words else 0
138
-
139
- def process_query(self, user_id: str, query: str) -> str:
140
- """Main processing pipeline"""
141
- # Analyze sentiment
142
- sentiment = self._analyze_sentiment(query)
143
- self.sentiment_window.append(sentiment)
144
-
145
- # Update context
146
- context = self._update_context(user_id, query, sentiment)
147
-
148
- # Generate response
149
- response = self._generate_response(query, context, sentiment)
150
-
151
- # Memory operations
152
- self.memory.add_memory(
153
- key=user_id,
154
- content=query,
155
- domain=self._detect_domain(query),
156
- sentiment=sentiment
157
- )
158
-
159
- # Track engagement
160
- self._track_engagement(response)
161
-
162
- return response
163
-
164
- def _analyze_sentiment(self, text: str) -> float:
165
- """Dynamic sentiment analysis with moving window"""
166
- # Placeholder for actual sentiment analysis
167
- positive_words = {"good", "great", "happy", "awesome"}
168
- negative_words = {"bad", "terrible", "hate", "awful"}
169
- words = text.lower().split()
170
- score = (sum(1 for w in words if w in positive_words) -
171
- sum(1 for w in words if w in negative_words)) / len(words)
172
-
173
- # Apply moving window smoothing
174
- if self.sentiment_window:
175
- score = 0.7 * score + 0.3 * np.mean(self.sentiment_window[-5:])
176
-
177
- return max(min(score, 1.0), -1.0)
178
-
179
- def _detect_domain(self, query: str) -> str:
180
- """Cross-domain detection"""
181
- domains = {
182
- "technical": {"how", "build", "code", "create"},
183
- "emotional": {"feel", "think", "believe", "opinion"},
184
- "factual": {"what", "when", "where", "why"}
185
- }
186
-
187
- words = set(query.lower().split())
188
- scores = {
189
- domain: len(words & keywords)
190
- for domain, keywords in domains.items()
191
- }
192
-
193
- return max(scores, key=scores.get)
194
-
195
- def _update_context(self, user_id: str, query: str, sentiment: float) -> dict:
196
- """Maintain dynamic conversation context"""
197
- context = self.session_context[user_id]
198
-
199
- # Maintain last 5 interactions
200
- context.setdefault("history", []).append(query)
201
- if len(context["history"]) > 5:
202
- context["history"].pop(0)
203
-
204
- # Track sentiment trends
205
- context["sentiment"] = 0.8 * context.get("sentiment", 0) + 0.2 * sentiment
206
-
207
- return context
208
-
209
- def _generate_response(self, query: str, context: dict, sentiment: float) -> str:
210
- """Generate response with contextual awareness"""
211
- # Retrieve relevant memories
212
- memories = self.memory.recall(
213
- key=self._detect_domain(query),
214
- context=query
215
- )
216
-
217
- # Build prompt with context
218
- prompt = f"Context: {context}\nMemories: {memories[:3]}\nQuery: {query}"
219
-
220
- try:
221
- response = openai.ChatCompletion.create(
222
- model="gpt-3.5-turbo",
223
- messages=[
224
- {"role": "system", "content": prompt},
225
- {"role": "user", "content": query}
226
- ]
227
- ).choices[0].message['content']
228
-
229
- # Adjust response based on sentiment
230
- if sentiment < -0.5:
231
- response = f"I understand this might be frustrating. {response}"
232
- elif sentiment > 0.5:
233
- response = f"Great to hear! {response}"
234
-
235
- except Exception as e:
236
- logger.error(f"API Error: {e}")
237
- response = "I'm having trouble processing that request right now."
238
-
239
- return response
240
-
241
- # ====================
242
- # Usage Example
243
- # ====================
244
- if __name__ == "__main__":
245
- bot = SentientGPT()
246
-
247
- while True:
248
- query = input("User: ")
249
- if query.lower() in ["exit", "quit"]:
250
- break
251
-
252
- response = bot.process_query("user123", query)
253
- print(f"AI: {response}")
254
- bot.memory.save_memory()