Leonydis137 commited on
Commit
8680b8f
·
verified ·
1 Parent(s): 71414fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +909 -160
app.py CHANGED
@@ -1,177 +1,926 @@
1
- import gradio as gr
2
- from memory_manager import embed_and_store, retrieve_relevant
3
- from agent_engine import step_turn
4
- from analysis_tools import analyze_sentiment_topics, plot_participation, generate_knowledge_graph
5
- from exporter import export_txt, export_json, export_pdf, send_webhook
6
 
7
- # Default agent parameters
8
- DEFAULT_PARAMS = {
9
- agent: {"creativity": 0.7, "criticality": 0.7}
10
- for agent in ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Agent color mapping
14
- COLOR_MAP = {
15
- "Initiator": "#e6f7ff",
16
- "Responder": "#f6ffed",
17
- "Guardian": "#fff7e6",
18
- "Provocateur": "#f9e6ff",
19
- "Cultural": "#e6ffed",
20
- "Judge": "#f0f0f0",
21
- "System": "#d9d9d9",
22
- "User": "#ffffff"
23
- }
24
 
 
 
 
25
 
26
- def main():
27
- with gr.Blocks(css="""
28
- .convo-container {
29
- max-height: 400px;
30
- overflow-y: auto;
31
- padding: 10px;
32
- border: 1px solid #ccc;
33
- border-radius: 8px;
34
- background: #fafafa;
35
- }
36
- .message-card {
37
- padding: 8px;
38
- margin-bottom: 6px;
39
- border-radius: 6px;
40
- }
41
- .agent-panel {
42
- border: 1px solid #ddd;
43
- padding: 6px;
44
- border-radius: 4px;
45
- margin: 4px;
46
- }
47
- """, title="Hexa-Agent Discussion System") as demo:
48
- # States
49
- conversation_state = gr.State([])
50
- turn_state = gr.State(0)
51
- topic_state = gr.State("")
52
- params_state = gr.State(DEFAULT_PARAMS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- # Header
55
- gr.Markdown("# 🧠 Modular Multi-Agent Discussion Platform")
56
 
57
- # Controls
 
 
 
 
 
 
 
 
 
 
58
  with gr.Row():
59
- topic_input = gr.Textbox(label="Topic", placeholder="Enter topic...", value="Ethical AI")
60
- set_topic_btn = gr.Button("Set Topic")
61
- clear_btn = gr.Button("Clear Discussion")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  with gr.Row():
63
- step_btn = gr.Button("▶️ Next Turn")
64
- gr.Markdown("---")
65
-
66
- # Conversation display
67
- convo_display = gr.HTML("<div class='convo-container' id='convo'></div>", label="Conversation")
68
-
69
- # Agent Panels
70
- with gr.Accordion("Agent Panels", open=False):
71
- initiator_panel = gr.Textbox(label="Initiator Latest", interactive=False)
72
- responder_panel = gr.Textbox(label="Responder Latest", interactive=False)
73
- guardian_panel = gr.Textbox(label="Guardian Latest", interactive=False)
74
- provocateur_panel = gr.Textbox(label="Provocateur Latest", interactive=False)
75
- cultural_panel = gr.Textbox(label="Cultural Latest", interactive=False)
76
- judge_panel = gr.Textbox(label="Judge Latest", interactive=False)
77
-
78
- # Analysis Tab
79
- with gr.Tab("Analysis"):
80
- sentiment = gr.Textbox(label="Sentiment")
81
- topics = gr.Textbox(label="Key Topics")
82
- part_plot = gr.Image(label="Participation Chart")
83
- graph_plot = gr.Image(label="Knowledge Graph")
84
- analyze_btn = gr.Button("Run Analysis")
85
- graph_btn = gr.Button("Generate Graph")
86
-
87
- # Configuration Tab: Parameter Sliders
88
- with gr.Tab("Configuration"):
89
- sliders = {}
90
- for agent in ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]:
91
  with gr.Row():
92
- sliders[f"{agent}_creativity"] = gr.Slider(0.0, 1.0, value=DEFAULT_PARAMS[agent]['creativity'], label=f"{agent} Creativity")
93
- sliders[f"{agent}_criticality"] = gr.Slider(0.0, 1.0, value=DEFAULT_PARAMS[agent]['criticality'], label=f"{agent} Criticality")
94
-
95
- # Export Tab
96
- with gr.Tab("Export"):
97
- fmt = gr.Radio(choices=["txt","json","pdf"], label="Format", value="txt")
98
- export_btn = gr.Button("Export")
99
- export_out = gr.File(label="Download")
100
- webhook_url = gr.Textbox(label="Webhook URL")
101
- send_btn = gr.Button("Send to Webhook")
102
- send_status = gr.Textbox(label="Status")
103
-
104
- # Event handlers
105
- set_topic_btn.click(lambda t: ([], 0, t), inputs=[topic_input], outputs=[convo_display, turn_state, topic_state])
106
- clear_btn.click(lambda: ([], 0, ""), outputs=[convo_display, turn_state, topic_state])
107
-
108
- def on_step(convo, turn, topic, params, *slider_vals):
109
- # Update params from sliders
110
- agents = ["Initiator","Responder","Guardian","Provocateur","Cultural","Judge"]
111
- new_params = {}
112
- idx = 0
113
- for agent in agents:
114
- new_params[agent] = {
115
- 'creativity': slider_vals[idx],
116
- 'criticality': slider_vals[idx+1]
117
- }
118
- idx += 2
119
- params = new_params
120
- if turn == 0 and topic:
121
- convo = [{"agent":"System","text":f"Topic: {topic}"}]
122
- convo = step_turn(convo, turn, topic or topic_input.value, params)
123
- # Build HTML
124
- html = ''
125
- for msg in convo:
126
- color = COLOR_MAP.get(msg['agent'], '#ffffff')
127
- html += f"<div class='message-card' style='background:{color};'><b>{msg['agent']}:</b> {msg['text']}</div>"
128
- html += "<script>var c=document.getElementById('convo'); c.scrollTop=c.scrollHeight;</script>"
129
- # Update panels
130
- panels = []
131
- for agent in agents:
132
- panels.append(next((m['text'] for m in reversed(convo) if m['agent']==agent), ''))
133
- return (html, convo, turn+1) + tuple(panels) + (params,)
134
-
135
- # Connect step with sliders
136
- step_btn.click(
137
- on_step,
138
- inputs=[conversation_state, turn_state, topic_state, params_state] + list(sliders.values()),
139
- outputs=[convo_display, conversation_state, turn_state,
140
- initiator_panel, responder_panel, guardian_panel,
141
- provocateur_panel, cultural_panel, judge_panel,
142
- params_state]
143
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
- analyze_btn.click(
146
- lambda convo: (
147
- analyze_sentiment_topics(convo)['sentiment'],
148
- ", ".join(analyze_sentiment_topics(convo)['topics']),
149
- plot_participation(convo, 'participation.png')
150
- ),
151
- inputs=[conversation_state],
152
- outputs=[sentiment, topics, part_plot]
153
- )
154
 
155
- graph_btn.click(
156
- lambda convo: generate_knowledge_graph(convo, 'graph.png'),
157
- inputs=[conversation_state],
158
- outputs=[graph_plot]
159
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
- export_btn.click(
162
- lambda fmt, convo, topic, turn: {
163
- 'txt': export_txt(convo, topic, turn),
164
- 'json': export_json(convo, topic, turn),
165
- 'pdf': export_pdf(convo, topic, turn)
166
- }[fmt],
167
- inputs=[fmt, conversation_state, topic_state, turn_state],
168
- outputs=[export_out]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
- send_btn.click(send_webhook, inputs=[webhook_url, conversation_state, topic_state, turn_state], outputs=[send_status])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
 
175
 
176
- if __name__ == '__main__':
177
- main()
 
 
1
+ import os
2
+ import zipfile
3
+ import base64
 
 
4
 
5
+ def create_autonomous_ai_package():
6
+ # Create project directory structure
7
+ os.makedirs("Autonomous-AI-System", exist_ok=True)
8
+
9
+ # Define all files with their content
10
+ files = {
11
+ "app.py": """import gradio as gr
12
+ import uuid
13
+ import json
14
+ import logging
15
+ import os
16
+ import datetime
17
+ from memory import MemoryManager
18
+ from planner import Planner
19
+ from executor import Executor
20
+ from critic import Critic
21
+ from cognitive_engine import CognitiveEngine
22
+ from web_searcher import WebSearcher
23
+ from hf_packager import HFSpacePackager
24
 
25
+ # Initialize components
26
+ memory = MemoryManager()
27
+ planner = Planner()
28
+ executor = Executor()
29
+ critic = Critic()
30
+ cog_engine = CognitiveEngine()
31
+ web_searcher = WebSearcher()
32
+ packager = HFSpacePackager()
 
 
 
33
 
34
+ # Set up logging
35
+ logging.basicConfig(filename='log.txt', level=logging.INFO,
36
+ format='%(asctime)s - %(levelname)s - %(message)s')
37
 
38
+ class AutonomousAgent:
39
+ def __init__(self):
40
+ self.state_file = "state.json"
41
+ self.load_state()
42
+
43
+ def load_state(self):
44
+ try:
45
+ with open(self.state_file, 'r') as f:
46
+ self.state = json.load(f)
47
+ except (FileNotFoundError, json.JSONDecodeError):
48
+ self.state = {"sessions": {}}
49
+ self.save_state()
50
+
51
+ def save_state(self):
52
+ with open(self.state_file, 'w') as f:
53
+ json.dump(self.state, f, indent=2)
54
+
55
+ def process_goal(self, goal, session_id=None):
56
+ try:
57
+ if not session_id:
58
+ session_id = str(uuid.uuid4())
59
+ self.state["sessions"][session_id] = {"goal": goal, "status": "processing"}
60
+ memory.init_session(session_id)
61
+ self.save_state()
62
+
63
+ # Add to memory
64
+ memory.add(session_id, "user_goal", goal)
65
+
66
+ # Plan the task
67
+ plan = planner.plan_task(goal, memory.get(session_id))
68
+ memory.add(session_id, "plan", plan)
69
+
70
+ # Execute plan
71
+ results = []
72
+ for step in plan:
73
+ if "research" in step.lower() or "search" in step.lower():
74
+ # Perform web search
75
+ search_query = step.split(":")[1].strip() if ":" in step else goal
76
+ search_results = web_searcher.search(search_query)
77
+ memory.add(session_id, f"search:{search_query}", search_results)
78
+ results.append(f"🔍 Search results for '{search_query}':\\n{search_results[:500]}...")
79
+
80
+ elif "develop" in step.lower() or "code" in step.lower():
81
+ # Generate and execute code
82
+ code = cog_engine.generate_code(step, memory.get(session_id))
83
+ execution_result = executor.execute_code(code)
84
+ memory.add(session_id, "generated_code", code)
85
+ memory.add(session_id, "execution_result", execution_result)
86
+
87
+ # Review and improve
88
+ review = critic.review(step, execution_result)
89
+ memory.add(session_id, "review", review)
90
+
91
+ if "error" in review.lower() or "improve" in review.lower():
92
+ enhanced_code = cog_engine.improve_code(code, review)
93
+ memory.add(session_id, "enhanced_code", enhanced_code)
94
+ execution_result = executor.execute_code(enhanced_code)
95
+ results.append(f"🛠️ Enhanced code execution:\\n{execution_result}")
96
+ else:
97
+ results.append(f"✅ Code executed successfully:\\n{execution_result}")
98
+
99
+ elif "diagnose" in step.lower() or "check" in step.lower():
100
+ # Self-diagnostic
101
+ issues = cog_engine.identify_improvements(step)
102
+ memory.add(session_id, "diagnosis", issues)
103
+
104
+ if issues:
105
+ fixes = cog_engine.generate_enhancements(issues)
106
+ cog_engine.apply_enhancements(fixes)
107
+ results.append(f"⚙️ System repaired: {', '.join(issues)}")
108
+ else:
109
+ results.append("✅ System health check passed")
110
+
111
+ self.state["sessions"][session_id]["status"] = "completed"
112
+ self.save_state()
113
+ snapshot_url = packager.create_snapshot({
114
+ "session_id": session_id,
115
+ "memory": memory.get(session_id),
116
+ "results": results
117
+ })
118
+ return "\\n\\n".join(results), session_id, snapshot_url
119
+
120
+ except Exception as e:
121
+ logging.error(f"Error processing goal: {str(e)}", exc_info=True)
122
+ # Attempt self-repair
123
+ issues = [f"Runtime error: {str(e)}"]
124
+ fixes = cog_engine.generate_enhancements(issues)
125
+ cog_engine.apply_enhancements(fixes)
126
+ return f"⚠️ Error occurred. Self-repair initiated: {str(e)}", session_id, ""
127
 
128
+ # Initialize the agent
129
+ agent = AutonomousAgent()
130
 
131
+ # Create the Gradio interface
132
+ with gr.Blocks(css="style.css", title="Autonomous AI") as demo:
133
+ session_id = gr.State()
134
+ learning_session = gr.State({"history": []})
135
+
136
+ gr.Markdown("# 🤖 Autonomous AI System")
137
+
138
+ with gr.Tab("Task Execution"):
139
+ gr.Markdown("## 🚀 Execute Autonomous Tasks")
140
+ gr.Markdown("Enter a goal and the AI will research, plan, code, and self-improve to accomplish it.")
141
+
142
  with gr.Row():
143
+ goal_input = gr.Textbox(label="Your Goal", placeholder="Enter what you want to achieve...")
144
+ submit_btn = gr.Button("Execute Goal", variant="primary")
145
+
146
+ output = gr.Textbox(label="Execution Results", interactive=False, lines=10)
147
+ session_display = gr.Textbox(label="Session ID", interactive=False)
148
+ snapshot = gr.Textbox(label="Snapshot URL", interactive=False)
149
+
150
+ submit_btn.click(
151
+ fn=agent.process_goal,
152
+ inputs=[goal_input, session_id],
153
+ outputs=[output, session_display, snapshot]
154
+ )
155
+
156
+ with gr.Tab("Learning Coach"):
157
+ gr.Markdown("## 🎓 Personalized Learning Assistant")
158
+ gr.Markdown("Ask about any topic and I'll provide curated knowledge from reliable sources!")
159
+
160
  with gr.Row():
161
+ with gr.Column(scale=3):
162
+ learning_chat = gr.Chatbot(label="Learning Conversation", height=400)
163
+ learning_input = gr.Textbox(label="Your Question", placeholder="Ask about any topic...")
164
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  with gr.Row():
166
+ submit_question = gr.Button("Ask", variant="primary")
167
+ clear_chat = gr.Button("Clear Conversation")
168
+
169
+ with gr.Column(scale=2):
170
+ knowledge_display = gr.Markdown("## Knowledge Resources\\n\\n*Your learning resources will appear here*")
171
+ level_buttons = gr.Radio(
172
+ ["Beginner", "Intermediate", "Advanced"],
173
+ label="Knowledge Level",
174
+ value="Beginner"
175
+ )
176
+ feedback = gr.Textbox(label="Feedback", placeholder="Too basic/advanced? Want more depth?")
177
+ submit_feedback = gr.Button("Submit Feedback")
178
+
179
+ # Learning Coach event handlers
180
+ def ask_question(question, history, session):
181
+ if not question.strip():
182
+ return history, session, "Please enter a question"
183
+
184
+ # Get response from cognitive engine
185
+ response = cog_engine.learning_coach_response(question)
186
+
187
+ # Update history
188
+ new_entry = (question, response)
189
+ history.append(new_entry)
190
+
191
+ # Get knowledge display
192
+ knowledge_html = response if response.startswith("##") else "## 📚 Knowledge Resources\\n\\n" + response
193
+
194
+ return history, session, knowledge_html
195
+
196
+ def update_learning_level(level, session):
197
+ cog_engine.learning_context["level"] = level.lower()
198
+ return session
199
+
200
+ def process_feedback(feedback_text, session):
201
+ if feedback_text.strip():
202
+ cog_engine.update_learning_context(feedback_text)
203
+ return "Feedback received! I'll adjust my teaching approach.", session
204
+
205
+ def clear_conversation(session):
206
+ cog_engine.conversation_history = []
207
+ cog_engine.learning_context = {"level": "beginner"}
208
+ return [], session, "## Knowledge Resources\\n\\n*Conversation cleared*"
209
+
210
+ # Connect components
211
+ submit_question.click(
212
+ ask_question,
213
+ [learning_input, learning_chat, learning_session],
214
+ [learning_chat, learning_session, knowledge_display]
215
+ )
216
+
217
+ level_buttons.change(
218
+ update_learning_level,
219
+ [level_buttons, learning_session],
220
+ [learning_session]
221
+ )
222
+
223
+ submit_feedback.click(
224
+ process_feedback,
225
+ [feedback, learning_session],
226
+ [feedback, learning_session]
227
+ )
228
+
229
+ clear_chat.click(
230
+ clear_conversation,
231
+ [learning_session],
232
+ [learning_chat, learning_session, knowledge_display]
233
+ )
234
+
235
+ # Allow submitting question with Enter key
236
+ learning_input.submit(
237
+ ask_question,
238
+ [learning_input, learning_chat, learning_session],
239
+ [learning_chat, learning_session, knowledge_display]
240
+ )
241
 
242
+ if __name__ == "__main__":
243
+ demo.launch()
244
+ """,
245
+
246
+ "knowledge_integrator.py": """import requests
247
+ import json
248
+ import feedparser
249
+ from datetime import datetime
250
+ import re
251
 
252
+ class KnowledgeIntegrator:
253
+ SOURCES = {
254
+ "openstax": {
255
+ "endpoint": "https://openstax.org/api/v2/pages",
256
+ "params": {"type": "textbook", "fields": "title,description,url"},
257
+ "parser": lambda data: [{
258
+ "title": item["title"],
259
+ "description": item["description"],
260
+ "url": item["url"]
261
+ } for item in data["items"]] if "items" in data else []
262
+ },
263
+ "arxiv": {
264
+ "endpoint": "http://export.arxiv.org/api/query",
265
+ "params": {"search_query": "", "start": 0, "max_results": 3},
266
+ "parser": lambda data: [{
267
+ "title": entry.title,
268
+ "summary": entry.summary,
269
+ "published": entry.published,
270
+ "url": entry.link
271
+ } for entry in data.entries] if hasattr(data, 'entries') else []
272
+ },
273
+ "wikimedia": {
274
+ "endpoint": "https://en.wikipedia.org/api/rest_v1/page/summary/",
275
+ "parser": lambda data: {
276
+ "summary": data["extract"],
277
+ "url": data["content_urls"]["desktop"]["page"]
278
+ } if "extract" in data else {}
279
+ },
280
+ "khanacademy": {
281
+ "endpoint": "https://www.khanacademy.org/api/v1/topic/",
282
+ "parser": lambda data: [{
283
+ "title": item["title"],
284
+ "description": item["description"],
285
+ "url": f"https://www.khanacademy.org{item['url']}"
286
+ } for item in data.get("children", []) if item["kind"] == "Video"]
287
+ }
288
+ }
289
+
290
+ def __init__(self):
291
+ self.query_history = []
292
+
293
+ def retrieve_knowledge(self, query, source_name):
294
+ source = self.SOURCES.get(source_name.lower())
295
+ if not source:
296
+ return {"error": "Invalid knowledge source"}
297
+
298
+ try:
299
+ # Special handling for different API formats
300
+ if source_name == "wikimedia":
301
+ formatted_query = query.replace(" ", "_")
302
+ response = requests.get(source["endpoint"] + formatted_query)
303
+ elif source_name == "khanacademy":
304
+ response = requests.get(source["endpoint"] + query.lower())
305
+ else:
306
+ params = source["params"].copy()
307
+ params["search_query"] = query
308
+ response = requests.get(source["endpoint"], params=params)
309
+
310
+ response.raise_for_status()
311
+
312
+ # Parse based on content type
313
+ if 'json' in response.headers.get('Content-Type', ''):
314
+ data = response.json()
315
+ else:
316
+ # arXiv returns XML
317
+ data = feedparser.parse(response.text)
318
+
319
+ return source["parser"](data)
320
+ except requests.exceptions.RequestException as e:
321
+ return {"error": f"Network error: {str(e)}"}
322
+ except Exception as e:
323
+ return {"error": f"Processing error: {str(e)}"}
324
+
325
+ def integrate_in_teaching(self, concept, age_group="beginner", max_results=3):
326
+ \"\"\"Augment explanations with verified knowledge\"\"\"
327
+ # Get foundational knowledge
328
+ openstax_results = self.retrieve_knowledge(concept, "openstax")
329
+ khan_results = self.retrieve_knowledge(concept, "khanacademy")
330
+
331
+ # Get current research
332
+ arxiv_results = self.retrieve_knowledge(concept, "arxiv")
333
+
334
+ # Get quick reference
335
+ wiki_results = self.retrieve_knowledge(concept, "wikimedia")
336
+
337
+ # Filter results based on age group
338
+ if age_group == "beginner":
339
+ arxiv_results = [] # Skip research for beginners
340
+
341
+ # Format results
342
+ return {
343
+ "concept": concept,
344
+ "foundational": self._filter_results(openstax_results + khan_results, max_results),
345
+ "research": self._filter_results(arxiv_results, max_results),
346
+ "summary": wiki_results.get("summary", f"No summary available for {concept}"),
347
+ "summary_url": wiki_results.get("url", "")
348
+ }
349
+
350
+ def _filter_results(self, results, max_results):
351
+ \"\"\"Filter out error responses and limit results\"\"\"
352
+ if isinstance(results, list):
353
+ return [item for item in results if not isinstance(item, dict) or "error" not in item][:max_results]
354
+ return results
355
+
356
+ def format_for_display(self, knowledge_data):
357
+ \"\"\"Create user-friendly presentation of knowledge\"\"\"
358
+ if "error" in knowledge_data:
359
+ return knowledge_data["error"]
360
+
361
+ response = f"## 📚 Learning Resources: {knowledge_data['concept']}\\n\\n"
362
+
363
+ # Summary section
364
+ summary = knowledge_data['summary']
365
+ # Simplify complex sentences for beginners
366
+ if len(summary.split()) > 100:
367
+ summary = ". ".join(summary.split(". ")[:3]) + "."
368
+ response += f"**💡 Key Insights**\\n{summary}\\n"
369
+ if knowledge_data['summary_url']:
370
+ response += f"[Read more]({knowledge_data['summary_url']})\\n\\n"
371
+
372
+ # Foundational knowledge
373
+ if knowledge_data['foundational']:
374
+ response += "**📖 Foundational Knowledge**\\n"
375
+ for item in knowledge_data['foundational']:
376
+ title = item.get('title', 'Untitled Resource')
377
+ desc = item.get('description', 'No description available')
378
+ url = item.get('url', '#')
379
+ response += f"- [{title}]({url}): {desc[:150]}...\\n"
380
+ response += "\\n"
381
+
382
+ # Research frontier
383
+ if knowledge_data['research']:
384
+ response += "**🔬 Current Research**\\n"
385
+ for item in knowledge_data['research']:
386
+ title = item.get('title', 'Untitled Paper')
387
+ published = item.get('published', '')
388
+ if published:
389
+ try:
390
+ pub_date = datetime.strptime(published, '%Y-%m-%dT%H:%M:%SZ')
391
+ published = pub_date.strftime('%b %Y')
392
+ except:
393
+ pass
394
+ url = item.get('url', '#')
395
+ response += f"- [{title}]({url})"
396
+ if published:
397
+ response += f" ({published})"
398
+ response += "\\n"
399
+
400
+ response += "\\n**Which area would you like to explore further?**"
401
+ return response
402
+
403
+ def extract_concept(self, user_input):
404
+ \"\"\"Extract the main concept from user input\"\"\"
405
+ # Simple extraction: look for key phrases
406
+ patterns = [
407
+ r"explain (.*)",
408
+ r"what is (.*)\\?",
409
+ r"tell me about (.*)",
410
+ r"how does (.*) work\\?",
411
+ r"teach me (.*)"
412
+ ]
413
+
414
+ for pattern in patterns:
415
+ match = re.search(pattern, user_input, re.IGNORECASE)
416
+ if match:
417
+ concept = match.group(1).strip()
418
+ # Remove trailing question words
419
+ concept = re.sub(r"\\?$", "", concept)
420
+ return concept
421
+
422
+ # Fallback: return the first 3 words
423
+ return " ".join(user_input.split()[:3])
424
+ """,
425
+
426
+ "cognitive_engine.py": """import random
427
+ import os
428
+ from knowledge_integrator import KnowledgeIntegrator
429
+
430
+ class CognitiveEngine:
431
+ def __init__(self):
432
+ self.knowledge = KnowledgeIntegrator()
433
+ self.learning_context = {"level": "beginner"}
434
+ self.conversation_history = []
435
+
436
+ def identify_improvements(self, task):
437
+ issues = []
438
+ if random.random() > 0.7:
439
+ issues.append("Memory optimization")
440
+ if random.random() > 0.8:
441
+ issues.append("Error handling enhancement")
442
+ if "search" in task:
443
+ issues.append("Information retrieval accuracy")
444
+ return issues
445
+
446
+ def generate_enhancements(self, targets):
447
+ enhancements = []
448
+ for target in targets:
449
+ if "memory" in target.lower():
450
+ enhancements.append("Optimized memory usage with caching")
451
+ elif "error" in target.lower():
452
+ enhancements.append("Added comprehensive error handling")
453
+ elif "retrieval" in target.lower():
454
+ enhancements.append("Improved search relevance algorithms")
455
+ return enhancements
456
+
457
+ def apply_enhancements(self, enhancements):
458
+ print(f"Applying enhancements: {enhancements}")
459
+ with open("log.txt", "a") as log_file:
460
+ log_file.write(f"System enhancements applied: {enhancements}\\n")
461
+ return True
462
+
463
+ def generate_code(self, task, context):
464
+ return f'''# Generated code for: {task}
465
+ import requests
466
+
467
+ def main():
468
+ \"\"\"Autonomously generated function\"\"\"
469
+ print("Hello from AI-generated code!")
470
+ print(f"Task context: {str(context)[:100]}...")
471
+
472
+ # TODO: Implement actual functionality
473
+ return "Task completed successfully"
474
+
475
+ if __name__ == "__main__":
476
+ main()
477
+ '''
478
+
479
+ def improve_code(self, code, review):
480
+ return f'''# Improved code based on review: "{review}"
481
+ {code}
482
 
483
+ # Added error handling based on review
484
+ try:
485
+ # Existing implementation
486
+ pass
487
+ except Exception as e:
488
+ print(f"Error occurred: {{str(e)}}")
489
+ # Additional error recovery logic
490
+ '''
491
+
492
+ def learning_coach_response(self, user_query, session_id=None):
493
+ \"\"\"Generate educational response with integrated knowledge\"\"\"
494
+ # Update conversation history
495
+ self.conversation_history.append({"role": "user", "content": user_query})
496
+
497
+ # Extract core concept
498
+ concept = self.knowledge.extract_concept(user_query)
499
+ if not self.learning_context.get("core_concept"):
500
+ self.learning_context["core_concept"] = concept
501
+
502
+ # Refine context based on conversation
503
+ if "explain" in user_query.lower() or "what is" in user_query.lower():
504
+ self.learning_context["level"] = "beginner"
505
+ elif "research" in user_query.lower() or "latest" in user_query.lower():
506
+ self.learning_context["level"] = "advanced"
507
+
508
+ # Retrieve and integrate knowledge
509
+ knowledge = self.knowledge.integrate_in_teaching(
510
+ self.learning_context["core_concept"],
511
+ age_group=self.learning_context["level"]
512
  )
513
+
514
+ # Format response
515
+ response = self.knowledge.format_for_display(knowledge)
516
+
517
+ # Update conversation history
518
+ self.conversation_history.append({"role": "assistant", "content": response})
519
+
520
+ return response
521
+
522
+ def update_learning_context(self, user_feedback):
523
+ \"\"\"Refine teaching approach based on user interaction\"\"\"
524
+ if "too basic" in user_feedback.lower():
525
+ self.learning_context["level"] = "advanced"
526
+ elif "too advanced" in user_feedback.lower():
527
+ self.learning_context["level"] = "beginner"
528
+ elif "related to" in user_feedback.lower():
529
+ # Extract new concept from feedback
530
+ new_concept = user_feedback.split("related to")[-1].strip()
531
+ self.learning_context["core_concept"] = new_concept
532
+ elif "change topic" in user_feedback.lower():
533
+ # Reset context for new topic
534
+ self.learning_context = {"level": "beginner"}
535
+
536
+ return "Thank you for your feedback! I'll adjust my teaching approach."
537
+ """,
538
+
539
+ "memory.py": """import json
540
+ import os
541
+ from datetime import datetime
542
 
543
+ class MemoryManager:
544
+ def __init__(self):
545
+ self.file_path = "memory.json"
546
+ self.memory = self.load_memory()
547
+
548
+ def load_memory(self):
549
+ if os.path.exists(self.file_path):
550
+ try:
551
+ with open(self.file_path, 'r') as f:
552
+ return json.load(f)
553
+ except json.JSONDecodeError:
554
+ return {}
555
+ return {}
556
+
557
+ def save_memory(self):
558
+ with open(self.file_path, 'w') as f:
559
+ json.dump(self.memory, f, indent=2)
560
+
561
+ def init_session(self, session_id):
562
+ if session_id not in self.memory:
563
+ self.memory[session_id] = {
564
+ "created": datetime.now().isoformat(),
565
+ "entries": []
566
+ }
567
+ self.save_memory()
568
+
569
+ def add(self, session_id, key, value):
570
+ if session_id in self.memory:
571
+ self.memory[session_id]["entries"].append({
572
+ "timestamp": datetime.now().isoformat(),
573
+ "key": key,
574
+ "value": value
575
+ })
576
+ self.save_memory()
577
+
578
+ def get(self, session_id):
579
+ return self.memory.get(session_id, {"entries": []})
580
+
581
+ def search(self, session_id, query):
582
+ results = []
583
+ for entry in self.get(session_id)["entries"]:
584
+ if (query.lower() in entry["key"].lower() or
585
+ (isinstance(entry["value"], str) and query.lower() in entry["value"].lower()):
586
+ results.append(entry)
587
+ return results
588
+
589
+ def get_summary(self, session_id):
590
+ entries = self.get(session_id)["entries"]
591
+ if not entries:
592
+ return "No memories found"
593
+
594
+ # Create a summary of key events
595
+ summary = [f"Session started at {entries[0]['timestamp']}"]
596
+ for entry in entries[-5:]: # Last 5 entries
597
+ summary.append(f"{entry['timestamp']}: {entry['key']}")
598
+ return "\\n".join(summary)
599
+ """,
600
+
601
+ "planner.py": """import random
602
 
603
+ class Planner:
604
+ def plan_task(self, goal, memory):
605
+ plan = [
606
+ f"Research: Search web for information about {goal}",
607
+ f"Learn: Analyze search results to understand {goal}",
608
+ f"Develop: Generate code to accomplish {goal}",
609
+ f"Execute: Run the generated code",
610
+ f"Diagnose: Check system health and performance",
611
+ f"Review: Evaluate results and identify improvements"
612
+ ]
613
+
614
+ # Add additional steps based on goal complexity
615
+ if len(goal) > 50 or "complex" in goal.lower():
616
+ plan.insert(2, "Design: Create architecture for solution")
617
+ plan.insert(4, "Implement: Build core functionality")
618
+
619
+ return plan
620
+ """,
621
+
622
+ "executor.py": """import subprocess
623
+ import os
624
+ import tempfile
625
+
626
+ class Executor:
627
+ def execute_code(self, code):
628
+ try:
629
+ # Save code to temporary file
630
+ with tempfile.NamedTemporaryFile(suffix=".py", delete=False, mode="w") as temp_file:
631
+ temp_file.write(code)
632
+ temp_file_path = temp_file.name
633
+
634
+ # Execute the code
635
+ result = subprocess.run(
636
+ ["python", temp_file_path],
637
+ capture_output=True,
638
+ text=True,
639
+ timeout=30
640
+ )
641
+
642
+ # Clean up
643
+ os.unlink(temp_file_path)
644
+
645
+ if result.returncode == 0:
646
+ return result.stdout.strip() or "Code executed successfully"
647
+ else:
648
+ return f"Error: {result.stderr.strip() or 'Unknown error'}"
649
+
650
+ except subprocess.TimeoutExpired:
651
+ return "Error: Code execution timed out"
652
+ except Exception as e:
653
+ return f"Execution error: {str(e)}"
654
+ """,
655
+
656
+ "critic.py": """import random
657
+
658
+ class Critic:
659
+ def review(self, step, result):
660
+ reviews = {
661
+ "code": [
662
+ "Code executed successfully with no errors.",
663
+ "Code executed but produced unexpected output.",
664
+ "Code contains inefficiencies that could be optimized.",
665
+ "Excellent implementation following best practices."
666
+ ],
667
+ "search": [
668
+ "Relevant information found for the task.",
669
+ "Search results could be more targeted.",
670
+ "Comprehensive research completed successfully."
671
+ ],
672
+ "diagnose": [
673
+ "System health check completed with no issues found.",
674
+ "Minor optimizations identified for system performance.",
675
+ "Critical improvements needed in error handling."
676
+ ]
677
+ }
678
+
679
+ if "code" in step.lower() or "develop" in step.lower():
680
+ return random.choice(reviews["code"])
681
+ elif "research" in step.lower() or "search" in step.lower():
682
+ return random.choice(reviews["search"])
683
+ elif "diagnose" in step.lower() or "check" in step.lower():
684
+ return random.choice(reviews["diagnose"])
685
+ return "Step completed adequately."
686
+ """,
687
+
688
+ "web_searcher.py": """from duckduckgo_search import DDGS
689
+ import json
690
+
691
+ class WebSearcher:
692
+ def search(self, query, max_results=5):
693
+ try:
694
+ with DDGS() as ddgs:
695
+ results = [r for r in ddgs.text(query, max_results=max_results)]
696
+ return json.dumps(results, indent=2)
697
+ except Exception as e:
698
+ return f"Search error: {str(e)}"
699
+ """,
700
+
701
+ "hf_packager.py": """import uuid
702
+ import json
703
+ from datetime import datetime
704
+ import os
705
+
706
+ class HFSpacePackager:
707
+ def create_snapshot(self, data=None):
708
+ snapshot_id = str(uuid.uuid4())
709
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
710
+ filename = f"snapshots/snapshot_{timestamp}_{snapshot_id[:8]}.json"
711
+
712
+ # Save snapshot to file
713
+ os.makedirs("snapshots", exist_ok=True)
714
+ with open(filename, "w") as f:
715
+ json.dump(data, f, indent=2)
716
+
717
+ return f"https://huggingface.co/spaces/Leonydis137/Autonomous-AI/snapshots?file={filename}"
718
+ """,
719
+
720
+ "style.css": """body {
721
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
722
+ background: linear-gradient(135deg, #1a2a6c, #b21f1f, #fdbb2d);
723
+ color: #333;
724
+ margin: 0;
725
+ padding: 20px;
726
+ min-height: 100vh;
727
+ }
728
+
729
+ .container {
730
+ max-width: 1200px;
731
+ margin: 0 auto;
732
+ background: rgba(255, 255, 255, 0.95);
733
+ border-radius: 15px;
734
+ box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2);
735
+ padding: 30px;
736
+ }
737
+
738
+ h1, h2, h3 {
739
+ color: #2c3e50;
740
+ }
741
+
742
+ h1 {
743
+ text-align: center;
744
+ margin-bottom: 30px;
745
+ font-size: 2.5rem;
746
+ text-shadow: 1px 1px 3px rgba(0,0,0,0.1);
747
+ }
748
+
749
+ .tab {
750
+ padding: 20px;
751
+ border-radius: 10px;
752
+ background: rgba(255, 255, 255, 0.85);
753
+ margin-top: 15px;
754
+ }
755
+
756
+ .input-group {
757
+ margin-bottom: 25px;
758
+ }
759
+
760
+ textarea, input[type="text"] {
761
+ width: 100%;
762
+ padding: 15px;
763
+ border: 2px solid #3498db;
764
+ border-radius: 10px;
765
+ font-size: 1.1rem;
766
+ transition: border-color 0.3s;
767
+ background: white;
768
+ }
769
+
770
+ textarea:focus, input[type="text"]:focus {
771
+ border-color: #e74c3c;
772
+ outline: none;
773
+ box-shadow: 0 0 10px rgba(231, 76, 60, 0.3);
774
+ }
775
+
776
+ button {
777
+ background: #3498db;
778
+ color: white;
779
+ border: none;
780
+ padding: 15px 25px;
781
+ font-size: 1.1rem;
782
+ border-radius: 10px;
783
+ cursor: pointer;
784
+ transition: all 0.3s;
785
+ font-weight: bold;
786
+ margin: 10px 5px;
787
+ }
788
+
789
+ button:hover {
790
+ background: #2980b9;
791
+ transform: translateY(-3px);
792
+ box-shadow: 0 5px 15px rgba(0,0,0,0.2);
793
+ }
794
+
795
+ .btn-primary {
796
+ background: #e74c3c;
797
+ }
798
+
799
+ .btn-primary:hover {
800
+ background: #c0392b;
801
+ }
802
+
803
+ .output-section {
804
+ background: #f8f9fa;
805
+ border-radius: 10px;
806
+ padding: 20px;
807
+ margin-top: 20px;
808
+ border-left: 5px solid #3498db;
809
+ }
810
+
811
+ .chat-container {
812
+ background: white;
813
+ border-radius: 10px;
814
+ padding: 15px;
815
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1);
816
+ max-height: 500px;
817
+ overflow-y: auto;
818
+ }
819
+
820
+ .chat-message {
821
+ margin: 10px 0;
822
+ padding: 10px 15px;
823
+ border-radius: 15px;
824
+ max-width: 80%;
825
+ }
826
+
827
+ .user-message {
828
+ background: #e3f2fd;
829
+ margin-left: auto;
830
+ border-bottom-right-radius: 5px;
831
+ }
832
+
833
+ .bot-message {
834
+ background: #f5f5f5;
835
+ margin-right: auto;
836
+ border-bottom-left-radius: 5px;
837
+ }
838
+
839
+ .knowledge-panel {
840
+ background: #fffde7;
841
+ border-radius: 10px;
842
+ padding: 15px;
843
+ margin-top: 15px;
844
+ border: 1px solid #ffecb3;
845
+ }
846
+
847
+ .knowledge-panel h3 {
848
+ color: #ff9800;
849
+ margin-top: 0;
850
+ border-bottom: 2px solid #ffecb3;
851
+ padding-bottom: 8px;
852
+ }
853
+
854
+ .resource-list {
855
+ list-style-type: none;
856
+ padding: 0;
857
+ }
858
+
859
+ .resource-list li {
860
+ margin-bottom: 10px;
861
+ padding: 8px;
862
+ background: white;
863
+ border-radius: 8px;
864
+ border-left: 3px solid #3498db;
865
+ }
866
+
867
+ .resource-link {
868
+ color: #2980b9;
869
+ text-decoration: none;
870
+ font-weight: bold;
871
+ }
872
+
873
+ .resource-link:hover {
874
+ text-decoration: underline;
875
+ }
876
+ """,
877
+
878
+ "requirements.txt": """duckduckgo-search
879
+ gradio
880
+ requests
881
+ python-dotenv
882
+ feedparser
883
+ """,
884
+
885
+ "README.md": """# Autonomous AI System
886
+
887
+ This is a self-improving AI system that can:
888
+ - Search the web for information
889
+ - Learn from new information
890
+ - Generate and execute its own code
891
+ - Diagnose and repair itself
892
+ - Continuously improve its capabilities
893
+
894
+ ## Features
895
+
896
+ 1. **Goal-Oriented Processing**
897
+ Enter any goal and the AI will create a plan to achieve it
898
+
899
+ 2. **Web Search Integration**
900
+ Uses DuckDuckGo to research topics and gather information
901
+
902
+ 3. **Self-Coding Capability**
903
+ Generates Python code to solve problems and executes it safely
904
+
905
+ 4. **Self-Diagnostic System**
906
+ Regularly checks its own performance and identifies improvements
907
+
908
+ 5. **Continuous Learning**
909
+ Maintains memory of all sessions for context-aware operations
910
+
911
+ 6. **Self-Repair Mechanism**
912
+ Automatically fixes identified issues in its own systems
913
+
914
+ 7. **Learning Coach**
915
+ Provides personalized education using vetted knowledge sources
916
+
917
+ 8. **Snapshot System**
918
+ Creates recoverable snapshots of system state
919
+
920
+ ## Setup Instructions
921
 
922
+ 1. Install Python 3.8 or higher
923
 
924
+ 2. Install requirements:
925
+ ```bash
926
+ pip install -r requirements.txt