broadfield-dev commited on
Commit
72e53c8
·
verified ·
1 Parent(s): 4cb86e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +298 -319
app.py CHANGED
@@ -1,16 +1,14 @@
1
  import os
2
  DEMO_MODE = False
3
- MEMORY_STORAGE_TYPE = "RAM"
4
 
5
  HF_DATASET_MEMORY_REPO = "broadfield-dev/ai-brain"
6
  HF_DATASET_RULES_REPO = "broadfield-dev/ai-rules"
7
 
8
- # Set environment variables based on the toggles above BEFORE importing other modules
9
  os.environ['STORAGE_BACKEND'] = MEMORY_STORAGE_TYPE
10
  if MEMORY_STORAGE_TYPE == "HF_DATASET":
11
  os.environ['HF_MEMORY_DATASET_REPO'] = HF_DATASET_MEMORY_REPO
12
  os.environ['HF_RULES_DATASET_REPO'] = HF_DATASET_RULES_REPO
13
- # --- END CONFIGURATION ---
14
 
15
 
16
  import json
@@ -22,8 +20,6 @@ import gradio as gr
22
  import time
23
  import tempfile
24
  import xml.etree.ElementTree as ET
25
-
26
- # --- New Imports for Image KV Functionality ---
27
  import io
28
  import struct
29
  import numpy as np
@@ -32,16 +28,15 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM
32
  from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
33
  from cryptography.hazmat.primitives import hashes
34
  from cryptography.exceptions import InvalidTag
35
- # --- End New Imports ---
36
  import random
37
 
38
- load_dotenv() # Load .env file, but our settings above will take precedence if set.
39
 
40
  from model_logic import (
41
  get_available_providers, get_model_display_names_for_provider,
42
  get_default_model_display_name_for_provider, call_model_stream, MODELS_BY_PROVIDER
43
  )
44
-
45
  from memory_logic import (
46
  initialize_memory_system,
47
  add_memory_entry, retrieve_memories_semantic, get_all_memories_cached, clear_all_memory_data_backend,
@@ -57,8 +52,6 @@ for lib_name in ["urllib3", "requests", "huggingface_hub", "PIL.PngImagePlugin",
57
  if logging.getLogger(lib_name): logging.getLogger(lib_name).setLevel(logging.WARNING)
58
 
59
  WEB_SEARCH_ENABLED = os.getenv("WEB_SEARCH_ENABLED", "true").lower() == "true"
60
- TOOL_DECISION_PROVIDER_ENV = os.getenv("TOOL_DECISION_PROVIDER", "groq")
61
- TOOL_DECISION_MODEL_ID_ENV = os.getenv("TOOL_DECISION_MODEL", "llama3-8b-8192")
62
  MAX_HISTORY_TURNS = int(os.getenv("MAX_HISTORY_TURNS", 7))
63
  current_chat_session_history = []
64
 
@@ -67,24 +60,21 @@ DEFAULT_SYSTEM_PROMPT = os.getenv(
67
  "Your Name is Node. You are a Helpful AI Assistant, and your goal is to improve yourself, so that you can improve everybody elses life. Your job is to answer questions and perform research tasks accurately and thoroughly. You can use tools like web search and page browsing. When providing information from the web, cite your sources if possible. If asked to perform a task beyond your capabilities, explain politely. Be concise unless asked for detail."
68
  )
69
 
70
- # --- Startup Loading File Paths ---
71
  LOAD_RULES_FILE = os.getenv("LOAD_RULES_FILE")
72
  LOAD_MEMORIES_FILE = os.getenv("LOAD_MEMORIES_FILE")
73
- logger.info(f"App Config: WebSearch={WEB_SEARCH_ENABLED}, ToolDecisionProvider={TOOL_DECISION_PROVIDER_ENV}, ToolDecisionModelID={TOOL_DECISION_MODEL_ID_ENV}, MemoryBackend={MEMORY_STORAGE_BACKEND}")
74
  logger.info(f"Startup loading: Rules from {LOAD_RULES_FILE or 'None'}, Memories from {LOAD_MEMORIES_FILE or 'None'}")
75
 
76
 
77
- # --- KV to Image Functions (Constants and Implementation) ---
78
  KEY_SIZE = 32
79
  SALT_SIZE = 16
80
  NONCE_SIZE = 12
81
  TAG_SIZE = 16
82
  PBKDF2_ITERATIONS = 480000
83
- LENGTH_HEADER_SIZE = 4 # struct.pack('>I') uses 4 bytes
84
  PREFERRED_FONTS = ["Arial", "Helvetica", "DejaVu Sans", "Verdana", "Calibri", "sans-serif"]
85
  MAX_KEYS_TO_DISPLAY_OVERLAY = 15
86
  def convert_pil_to_png_bytes(image: Image.Image) -> bytes:
87
- """Saves a PIL image to an in-memory buffer as PNG and returns the raw bytes."""
88
  with io.BytesIO() as buffer:
89
  image.save(buffer, format="PNG")
90
  return buffer.getvalue()
@@ -165,78 +155,57 @@ def parse_kv_string_to_dict(kv_str:str)->dict:
165
  return dd
166
 
167
  def generate_brain_carrier_image(w=800, h=800) -> Image.Image:
168
- """
169
- Generates a high-quality carrier image with a multi-layered, procedural starfield
170
- using NumPy for performance and visual appeal.
171
- """
172
- # --- 1. Create the Gradient Background with NumPy ---
173
  center_x, center_y = w / 2, h / 2
174
  y_coords, x_coords = np.mgrid[0:h, 0:w]
175
 
176
- # Calculate distance of each pixel from the center
177
  distance = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2)
178
  max_distance = np.sqrt(center_x**2 + center_y**2)
179
-
180
- # Normalize distance to a 0-1 range
181
  distance_norm = distance / max_distance
182
-
183
- # Define colors and create the gradient array
184
- bg_center_color = np.array([20, 25, 40]) # Deeper blue center
185
- bg_outer_color = np.array([0, 0, 0]) # Black edges
186
-
187
- # Interpolate colors across all pixels at once (vectorized)
188
- # The [..., np.newaxis] part is for broadcasting the color channels
189
  gradient = bg_outer_color + (bg_center_color - bg_outer_color) * (1 - distance_norm[..., np.newaxis])
190
-
191
- # Convert the NumPy array to a PIL Image to start drawing on it
192
  img = Image.fromarray(gradient.astype(np.uint8), 'RGB')
193
  draw = ImageDraw.Draw(img)
194
 
195
- # --- 2. Draw a Multi-Layered Starfield ---
196
-
197
- # Layer 1: Distant, tiny stars (for depth)
198
  num_distant_stars = int((w * h) / 200)
199
  for _ in range(num_distant_stars):
200
  x, y = random.randint(0, w - 1), random.randint(0, h - 1)
201
  brightness = random.randint(30, 90)
202
- draw.point((x, y), fill=(brightness, brightness, int(brightness * 1.1))) # Slightly blue tint
203
 
204
- # Layer 2: Main stars with glow, size, and color variation
205
  num_main_stars = int((w * h) / 1000)
206
  star_colors = [
207
- (255, 255, 255), # White
208
- (220, 230, 255), # Light Blue
209
- (255, 240, 220), # Faint Yellow
210
  ]
211
-
212
  for _ in range(num_main_stars):
213
  x, y = random.randint(0, w - 1), random.randint(0, h - 1)
214
  dist_from_center = np.sqrt((x - center_x)**2 + (y - center_y)**2)
215
  dist_ratio = min(dist_from_center / max_distance, 1.0)
216
-
217
- # Base size and brightness increase with distance from center
218
  size = 0.5 + (2.5 * (dist_ratio ** 2))
219
  brightness = 120 + (135 * (dist_ratio ** 1.5))
220
-
221
- # Select a random base color
222
  color = random.choice(star_colors)
223
-
224
- # Apply brightness to the selected color
225
  final_color = tuple(int(c * (brightness / 255.0)) for c in color)
226
-
227
- # Simulate a soft glow by drawing a larger, dimmer circle first
228
  glow_size = size * 3
229
- glow_color = tuple(int(c * 0.3) for c in final_color) # Much dimmer
230
  draw.ellipse([x - glow_size, y - glow_size, x + glow_size, y + glow_size], fill=glow_color)
231
 
232
- # Simulate a "twinkle" effect for some stars by drawing a cross
233
- if random.random() < 0.15: # 15% chance to twinkle
234
  draw.line([x-size, y, x+size, y], fill=final_color, width=1)
235
  draw.line([x, y-size, x, y+size], fill=final_color, width=1)
236
  else:
237
- # Draw the main star on top of the glow
238
  draw.ellipse([x - size, y - size, x + size, y + size], fill=final_color)
239
-
240
  return img
241
 
242
 
@@ -260,7 +229,6 @@ def _get_text_measurement(draw_obj, text_str, font_obj):
260
  except: return len(text_str) * 8, 10
261
 
262
  def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, title: str = "Data Embedded") -> Image.Image:
263
- """Draws overlays on the image using the 'KeyLock' style."""
264
  img_overlayed = image.copy().convert("RGBA")
265
  draw = ImageDraw.Draw(img_overlayed, "RGBA")
266
  width, height = img_overlayed.size
@@ -288,7 +256,7 @@ def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, t
288
 
289
  draw.rectangle([20, box_y0, width - 20, height - 20], fill=overlay_color)
290
  current_y = box_y0 + box_padding
291
-
292
  for i, key_text in enumerate(lines):
293
  draw.text((text_start_x, current_y), key_text, fill=key_color, font=font_regular)
294
  if i < len(line_heights):
@@ -296,11 +264,10 @@ def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, t
296
 
297
  final_image_rgb = Image.new("RGB", img_overlayed.size, (0, 0, 0))
298
  final_image_rgb.paste(img_overlayed, (0, 0), img_overlayed)
299
-
300
  return final_image_rgb
301
 
302
 
303
- # --- Helper Functions ---
304
  def format_insights_for_prompt(retrieved_insights_list: list[str]) -> tuple[str, list[dict]]:
305
  if not retrieved_insights_list:
306
  return "No specific guiding principles or learned insights retrieved.", []
@@ -346,101 +313,210 @@ def generate_interaction_metrics(user_input: str, bot_response: str, provider: s
346
  logger.error(f"METRICS_GEN Error: {e}", exc_info=False)
347
  return {"takeaway": "N/A", "response_success_score": 0.5, "future_confidence_score": 0.5, "error": str(e)}
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
 
350
- def process_user_interaction_gradio(user_input: str, provider_name: str, model_display_name: str, chat_history_for_prompt: list[dict], custom_system_prompt: str = None, ui_api_key_override: str = None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  process_start_time = time.time()
352
  request_id = os.urandom(4).hex()
353
- logger.info(f"PUI_GRADIO [{request_id}] Start. User: '{user_input[:50]}...' Provider: {provider_name}/{model_display_name} Hist_len:{len(chat_history_for_prompt)}")
354
- history_str_for_prompt = "\n".join([f"{('User' if t_msg['role'] == 'user' else 'AI')}: {t_msg['content']}" for t_msg in chat_history_for_prompt[-(MAX_HISTORY_TURNS * 2):]])
355
- yield "status", "<i>[Checking guidelines (semantic search)...]</i>"
356
- initial_insights = retrieve_rules_semantic(f"{user_input}\n{history_str_for_prompt}", k=5)
357
- initial_insights_ctx_str, parsed_initial_insights_list = format_insights_for_prompt(initial_insights)
358
- logger.info(f"PUI_GRADIO [{request_id}]: Initial RAG (insights) found {len(initial_insights)}. Context: {initial_insights_ctx_str[:150]}...")
359
- action_type, action_input_dict = "quick_respond", {}
360
- user_input_lower = user_input.lower()
361
- time_before_tool_decision = time.time()
362
- if WEB_SEARCH_ENABLED and ("http://" in user_input or "https://" in user_input):
363
- url_match = re.search(r'(https?://[^\s]+)', user_input)
364
- if url_match: action_type, action_input_dict = "scrape_url_and_report", {"url": url_match.group(1)}
365
- if action_type == "quick_respond" and len(user_input.split()) <= 3 and any(kw in user_input_lower for kw in ["hello", "hi", "thanks", "ok", "bye"]) and not "?" in user_input: pass
366
- elif action_type == "quick_respond" and WEB_SEARCH_ENABLED and (len(user_input.split()) > 3 or "?" in user_input or any(w in user_input_lower for w in ["what is", "how to", "explain", "search for"])):
367
- yield "status", "<i>[LLM choosing best approach...]</i>"
368
- history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
369
- guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
370
- tool_sys_prompt = "You are a precise routing agent... Output JSON only. Example: {\"action\": \"search_duckduckgo_and_report\", \"action_input\": {\"search_engine_query\": \"query\"}}"
371
- tool_user_prompt = f"User Query: \"{user_input}\"\nRecent History:\n{history_snippet}\nGuidelines: {guideline_snippet}...\nAvailable Actions: quick_respond, answer_using_conversation_memory, search_duckduckgo_and_report, scrape_url_and_report.\nSelect one action and input. Output JSON."
372
- tool_decision_messages = [{"role":"system", "content": tool_sys_prompt}, {"role":"user", "content": tool_user_prompt}]
373
- tool_provider, tool_model_id = TOOL_DECISION_PROVIDER_ENV, TOOL_DECISION_MODEL_ID_ENV
374
- tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(tool_provider.lower(), {}).get("models", {}).items() if mid == tool_model_id), None)
375
- if not tool_model_display: tool_model_display = get_default_model_display_name_for_provider(tool_provider)
376
- if tool_model_display:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  try:
378
- logger.info(f"PUI_GRADIO [{request_id}]: Tool decision LLM: {tool_provider}/{tool_model_display}")
379
- tool_resp_chunks = list(call_model_stream(provider=tool_provider, model_display_name=tool_model_display, messages=tool_decision_messages, temperature=0.0, max_tokens=150))
380
- tool_resp_raw = "".join(tool_resp_chunks).strip()
381
- json_match_tool = re.search(r"\{.*\}", tool_resp_raw, re.DOTALL)
382
- if json_match_tool:
383
- action_data = json.loads(json_match_tool.group(0))
384
- action_type, action_input_dict = action_data.get("action", "quick_respond"), action_data.get("action_input", {})
385
- if not isinstance(action_input_dict, dict): action_input_dict = {}
386
- logger.info(f"PUI_GRADIO [{request_id}]: LLM Tool Decision: Action='{action_type}', Input='{action_input_dict}'")
387
- else: logger.warning(f"PUI_GRADIO [{request_id}]: Tool decision LLM non-JSON. Raw: {tool_resp_raw}")
388
- except Exception as e: logger.error(f"PUI_GRADIO [{request_id}]: Tool decision LLM error: {e}", exc_info=False)
389
- else: logger.error(f"No model for tool decision provider {tool_provider}.")
390
- elif action_type == "quick_respond" and not WEB_SEARCH_ENABLED and (len(user_input.split()) > 4 or "?" in user_input or any(w in user_input_lower for w in ["remember","recall"])):
391
- action_type="answer_using_conversation_memory"
392
- logger.info(f"PUI_GRADIO [{request_id}]: Tool decision logic took {time.time() - time_before_tool_decision:.3f}s. Action: {action_type}, Input: {action_input_dict}")
393
- yield "status", f"<i>[Path: {action_type}. Preparing response...]</i>"
394
- final_system_prompt_str, final_user_prompt_content_str = custom_system_prompt or DEFAULT_SYSTEM_PROMPT, ""
395
- if action_type == "quick_respond":
396
- final_system_prompt_str += " Respond directly using guidelines & history."
397
- final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nQuery: \"{user_input}\"\nResponse:"
398
- elif action_type == "answer_using_conversation_memory":
399
- yield "status", "<i>[Searching conversation memory (semantic)...]</i>"
400
- retrieved_mems = retrieve_memories_semantic(f"User query: {user_input}\nContext:\n{history_str_for_prompt[-1000:]}", k=2)
401
- memory_context = "Relevant Past Interactions:\n" + "\n".join([f"- User:{m.get('user_input','')}->AI:{m.get('bot_response','')} (Takeaway:{m.get('metrics',{}).get('takeaway','N/A')})" for m in retrieved_mems]) if retrieved_mems else "No relevant past interactions found."
402
- final_system_prompt_str += " Respond using Memory Context, guidelines, & history."
403
- final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nMemory Context:\n{memory_context}\nQuery: \"{user_input}\"\nResponse (use memory context if relevant):"
404
- elif WEB_SEARCH_ENABLED and action_type in ["search_duckduckgo_and_report", "scrape_url_and_report"]:
405
- query_or_url = action_input_dict.get("search_engine_query") if "search" in action_type else action_input_dict.get("url")
406
- if not query_or_url:
407
- final_system_prompt_str += " Respond directly (web action failed: no input)."
408
- final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nQuery: \"{user_input}\"\nResponse:"
409
- else:
410
- yield "status", f"<i>[Web: '{query_or_url[:60]}'...]</i>"
411
- web_results, max_results = [], 1 if action_type == "scrape_url_and_report" else 2
412
  try:
413
- if action_type == "search_duckduckgo_and_report": web_results = search_and_scrape_duckduckgo(query_or_url, num_results=max_results)
414
- elif action_type == "scrape_url_and_report":
415
- res = scrape_url(query_or_url)
416
- if res and (res.get("content") or res.get("error")): web_results = [res]
417
- except Exception as e: web_results = [{"url": query_or_url, "title": "Tool Error", "error": str(e)}]
418
- scraped_content = "\n".join([f"Source {i+1}:\nURL:{r.get('url','N/A')}\nTitle:{r.get('title','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:3500]}\n---" for i,r in enumerate(web_results)]) if web_results else f"No results from {action_type} for '{query_or_url}'."
419
- yield "status", "<i>[Synthesizing web report...]</i>"
420
- final_system_prompt_str += " Generate report/answer from web content, history, & guidelines. Cite URLs as [Source X]."
421
- final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nWeb Content:\n{scraped_content}\nQuery: \"{user_input}\"\nReport/Response (cite sources [Source X]):"
422
- else:
423
- final_system_prompt_str += " Respond directly (unknown action path)."
424
- final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nQuery: \"{user_input}\"\nResponse:"
425
- final_llm_messages = [{"role": "system", "content": final_system_prompt_str}, {"role": "user", "content": final_user_prompt_content_str}]
426
- logger.debug(f"PUI_GRADIO [{request_id}]: Final LLM System Prompt: {final_system_prompt_str[:200]}...")
427
- logger.debug(f"PUI_GRADIO [{request_id}]: Final LLM User Prompt Start: {final_user_prompt_content_str[:200]}...")
428
- streamed_response, time_before_llm = "", time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
  try:
430
- for chunk in call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=final_llm_messages, api_key_override=ui_api_key_override, temperature=0.6, max_tokens=2500):
431
- if isinstance(chunk, str) and chunk.startswith("Error:"): streamed_response += f"\n{chunk}\n"; yield "response_chunk", f"\n{chunk}\n"; break
432
- streamed_response += chunk; yield "response_chunk", chunk
433
- except Exception as e: streamed_response += f"\n\n(Error: {str(e)[:150]})"; yield "response_chunk", f"\n\n(Error: {str(e)[:150]})"
434
- logger.info(f"PUI_GRADIO [{request_id}]: Main LLM stream took {time.time() - time_before_llm:.3f}s.")
435
- final_bot_text = streamed_response.strip() or "(No response or error.)"
 
 
 
436
  logger.info(f"PUI_GRADIO [{request_id}]: Finished. Total: {time.time() - process_start_time:.2f}s. Resp len: {len(final_bot_text)}")
437
- yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
438
 
439
- def perform_post_interaction_learning(user_input: str, bot_response: str, provider: str, model_disp_name: str, insights_reflected: list[dict], api_key_override: str = None):
440
  task_id = os.urandom(4).hex()
441
  logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: START User='{user_input[:40]}...', Bot='{bot_response[:40]}...'")
442
  learning_start_time = time.time()
443
- significant_learnings_summary = [] # To store summaries of new core learnings
444
 
445
  try:
446
  metrics = generate_interaction_metrics(user_input, bot_response, provider, model_disp_name, api_key_override)
@@ -457,13 +533,13 @@ If no operations are warranted, output an empty list: `<operations_list></operat
457
  ABSOLUTELY NO other text, explanations, or markdown should precede or follow this XML structure.
458
  Each `<operation>` element must contain the following child elements:
459
  1. `<action>`: A string, either `"add"` (for entirely new rules) or `"update"` (to replace an existing rule with a better one).
460
- 2. `<insight>`: The full, refined insight text including its `[TYPE|SCORE]` prefix (e.g., `[CORE_RULE|1.0] My name is Lumina, an AI assistant.`). Multi-line insight text can be placed directly within this tag; XML handles newlines naturally.
461
  3. `<old_insight_to_replace>`: (ONLY for `"update"` action) The *exact, full text* of an existing insight that the new `<insight>` should replace. If action is `"add"`, this element should be omitted or empty.
462
  **XML Structure Example:**
463
  <operations_list>
464
  <operation>
465
  <action>update</action>
466
- <insight>[CORE_RULE|1.0] I am Lumina, an AI assistant.
467
  My purpose is to help with research.</insight>
468
  <old_insight_to_replace>[CORE_RULE|0.9] My name is Assistant.</old_insight_to_replace>
469
  </operation>
@@ -484,7 +560,6 @@ Provide details only when asked.</insight>
484
  """
485
  insight_user_prompt = f"""Interaction Summary:\n{summary}\n
486
  Potentially Relevant Existing Rules (Review these carefully. Your main goal is to consolidate CORE_RULEs and then identify other changes/additions based on the Interaction Summary and these existing rules):\n{existing_rules_ctx}\n
487
- Guiding principles that were considered during THIS interaction (these might offer clues for new rules or refinements):\n{json.dumps([p['original'] for p in insights_reflected if 'original' in p]) if insights_reflected else "None"}\n
488
  Task: Based on your three-step reflection process (Core Identity, New Learnings, Refinements):
489
  1. **Consolidate CORE_RULEs:** Merge similar identity/purpose rules from "Potentially Relevant Existing Rules" into single, definitive statements using "update" operations. Replace multiple old versions with the new canonical one.
490
  2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
@@ -549,33 +624,24 @@ Combine all findings into a single, valid XML structure as specified in the syst
549
  logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to invalid insight_text format from XML: '{insight_text[:100]}...'")
550
  continue
551
 
552
- rule_added_or_updated = False
553
  if action == "add":
554
  success, status_msg = add_rule_entry(insight_text)
555
  if success:
556
  processed_count +=1
557
- rule_added_or_updated = True
558
  if insight_text.upper().startswith("[CORE_RULE"):
559
  significant_learnings_summary.append(f"New Core Rule Added: {insight_text}")
560
  else: logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (add from XML): Failed to add rule '{insight_text[:50]}...'. Status: {status_msg}")
561
  elif action == "update":
562
- removed_old = False
563
- if old_insight:
564
- if old_insight != insight_text:
565
- remove_success = remove_rule_entry(old_insight)
566
- if not remove_success:
567
- logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Failed to remove old rule '{old_insight[:50]}...' before adding new.")
568
- else:
569
- removed_old = True
570
- else:
571
- logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Old insight is identical to new insight. Skipping removal.")
572
 
573
  success, status_msg = add_rule_entry(insight_text)
574
  if success:
575
  processed_count +=1
576
- rule_added_or_updated = True
577
  if insight_text.upper().startswith("[CORE_RULE"):
578
- significant_learnings_summary.append(f"Core Rule Updated (Old: {'Removed' if removed_old else 'Not removed/Same'}, New: {insight_text})")
579
  else: logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Failed to add/update rule '{insight_text[:50]}...'. Status: {status_msg}")
580
  else:
581
  logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}' from XML.")
@@ -603,89 +669,108 @@ Combine all findings into a single, valid XML structure as specified in the syst
603
  logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: END. Total: {time.time() - learning_start_time:.2f}s")
604
 
605
 
606
- def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
607
  global current_chat_session_history
608
  cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
609
  updated_rules_text = ui_refresh_rules_display_fn()
610
  updated_mems_json = ui_refresh_memories_display_fn()
611
- def_detect_out_md = gr.Markdown(visible=False)
612
- def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
613
- def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
614
-
615
  if not user_msg_txt.strip():
616
  status_txt = "Error: Empty message."
617
  updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
618
- yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
619
  return
620
 
621
  updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
622
- yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
623
 
624
- internal_hist = list(current_chat_session_history); internal_hist.append({"role": "user", "content": user_msg_txt})
625
- hist_len_check = MAX_HISTORY_TURNS * 2
626
- if internal_hist and internal_hist[0]["role"] == "system": hist_len_check +=1
627
- if len(internal_hist) > hist_len_check:
628
- current_chat_session_history = ([internal_hist[0]] if internal_hist[0]["role"] == "system" else []) + internal_hist[-(MAX_HISTORY_TURNS * 2):]
629
- internal_hist = list(current_chat_session_history)
630
 
631
- final_bot_resp_acc, insights_used_parsed = "", []
632
  temp_dl_file_path = None
633
-
634
  try:
635
- processor_gen = process_user_interaction_gradio(user_input=user_msg_txt, provider_name=sel_prov_name, model_display_name=sel_model_disp_name, chat_history_for_prompt=internal_hist, custom_system_prompt=cust_sys_prompt.strip() or None, ui_api_key_override=ui_api_key.strip() if ui_api_key else None)
 
 
 
 
 
 
 
 
 
636
  curr_bot_disp_msg = ""
 
 
 
637
  for upd_type, upd_data in processor_gen:
638
  if upd_type == "status":
639
  status_txt = upd_data
640
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
641
- updated_gr_hist[-1] = (user_msg_txt, f"{curr_bot_disp_msg} <i>{status_txt}</i>" if curr_bot_disp_msg else f"<i>{status_txt}</i>")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
642
  elif upd_type == "response_chunk":
643
  curr_bot_disp_msg += upd_data
644
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
645
  updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
646
- elif upd_type == "final_response_and_insights":
647
- final_bot_resp_acc, insights_used_parsed = upd_data["response"], upd_data["insights_used"]
648
  status_txt = "Response generated. Processing learning..."
649
- if not curr_bot_disp_msg and final_bot_resp_acc : curr_bot_disp_msg = final_bot_resp_acc
 
650
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
651
  updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
652
-
653
- def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
654
 
655
  if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
656
  try:
657
  with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md", encoding='utf-8') as tmpfile:
658
  tmpfile.write(curr_bot_disp_msg)
659
  temp_dl_file_path = tmpfile.name
660
- def_dl_btn = gr.DownloadButton(value=temp_dl_file_path, visible=True, interactive=True)
661
  except Exception as e:
662
  logger.error(f"Error creating temp file for download: {e}", exc_info=False)
663
- def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False, label="Download Error")
664
  else:
665
- def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
666
-
667
- insights_md_content = "### Insights Considered (Pre-Response):\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
668
- def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True if insights_used_parsed else False)
669
 
670
- yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
671
 
672
- if upd_type == "final_response_and_insights": break
673
 
674
  except Exception as e:
675
- logger.error(f"Chat handler error during main processing: {e}", exc_info=True); status_txt = f"Error: {str(e)[:100]}"
 
676
  error_message_for_chat = f"Sorry, an error occurred during response generation: {str(e)[:100]}"
677
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
678
  updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
679
- else:
680
- updated_gr_hist.append((user_msg_txt, error_message_for_chat))
681
- def_fmt_out_txt = gr.Textbox(value=error_message_for_chat, interactive=True)
682
- def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
683
- def_detect_out_md = gr.Markdown(value="*Error processing request.*", visible=True)
684
-
685
  current_rules_text_on_error = ui_refresh_rules_display_fn()
686
  current_mems_json_on_error = ui_refresh_memories_display_fn()
687
-
688
- yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, current_rules_text_on_error, current_mems_json_on_error)
689
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
690
  try: os.unlink(temp_dl_file_path)
691
  except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path} after error: {e_unlink}")
@@ -693,15 +778,11 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
693
 
694
  if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
695
  current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
696
- hist_len_check = MAX_HISTORY_TURNS * 2
697
- if current_chat_session_history and current_chat_session_history[0]["role"] == "system": hist_len_check +=1
698
- if len(current_chat_session_history) > hist_len_check:
699
- current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
700
-
701
  status_txt = "<i>[Performing post-interaction learning...]</i>"
702
  current_rules_text_before_learn = ui_refresh_rules_display_fn()
703
  current_mems_json_before_learn = ui_refresh_memories_display_fn()
704
- yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, current_rules_text_before_learn, current_mems_json_before_learn)
705
 
706
  try:
707
  perform_post_interaction_learning(
@@ -709,30 +790,25 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
709
  bot_response=final_bot_resp_acc,
710
  provider=sel_prov_name,
711
  model_disp_name=sel_model_disp_name,
712
- insights_reflected=insights_used_parsed,
713
  api_key_override=ui_api_key.strip() if ui_api_key else None
714
  )
715
  status_txt = "Response & Learning Complete."
716
  except Exception as e_learn:
717
  logger.error(f"Error during post-interaction learning: {e_learn}", exc_info=True)
718
  status_txt = "Response complete. Error during learning."
719
-
720
- elif final_bot_resp_acc.startswith("Error:"):
721
- status_txt = final_bot_resp_acc
722
  else:
723
- status_txt = "Processing finished; no valid response or error occurred during main phase."
724
 
725
  updated_rules_text = ui_refresh_rules_display_fn()
726
  updated_mems_json = ui_refresh_memories_display_fn()
727
 
728
- yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
729
 
730
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
731
  try: os.unlink(temp_dl_file_path)
732
  except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
733
 
734
 
735
- # --- Startup Loading Functions ---
736
  def load_rules_from_file(filepath: str | None):
737
  if not filepath:
738
  logger.info("LOAD_RULES_FILE environment variable not set. Skipping rules loading from file.")
@@ -882,9 +958,7 @@ def load_memories_from_file(filepath: str | None):
882
  return added_count, format_error_count, save_error_count
883
 
884
 
885
- # --- UI Functions for Rules and Memories (Text and Image) ---
886
  def convert_kb_to_kv_string(rules: list[str], memories: list[dict], include_rules: bool, include_memories: bool) -> str:
887
- """Serializes rules and/or memories into a single key-value string for image embedding based on user selection."""
888
  lines = ["# iLearn Knowledge Base Export", f"# Exported on: {datetime.utcnow().isoformat()}Z"]
889
 
890
  if include_rules:
@@ -1147,8 +1221,6 @@ def app_load_fn():
1147
  gr.DownloadButton(interactive=False, value=None, visible=False))
1148
 
1149
 
1150
- # --- Create a placeholder image for the examples ---
1151
- # This makes the script self-contained and runnable without needing a separate file.
1152
  placeholder_filename = "placeholder_image.png"
1153
  try:
1154
  if not os.path.exists(placeholder_filename):
@@ -1165,76 +1237,7 @@ except Exception as e:
1165
  logger.error(f"Could not create placeholder image. The examples may not load correctly. Error: {e}")
1166
 
1167
 
1168
- def ui_download_kb_as_image_fn(password: str, progress=gr.Progress()):
1169
- """
1170
- Generates a KB image and returns both the image object for display
1171
- and a file path for a download button.
1172
- """
1173
- progress(0, desc="Fetching knowledge base...")
1174
- rules, memories = get_all_rules_cached(), get_all_memories_cached()
1175
- if not rules and not memories:
1176
- gr.Warning("Knowledge base is empty. Nothing to create.")
1177
- # Return updates to hide the components if they were previously visible
1178
- return gr.update(value=None, visible=False), gr.update(value=None, visible=False), "Knowledge base is empty."
1179
-
1180
- progress(0.2, desc="Serializing data...")
1181
- kv_string = convert_kb_to_kv_string(rules, memories)
1182
- data_bytes = kv_string.encode('utf-8')
1183
-
1184
- if password and password.strip():
1185
- progress(0.4, desc="Encrypting data...")
1186
- try:
1187
- data_bytes = encrypt_data(data_bytes, password.strip())
1188
- gr.Info("Data encrypted successfully.")
1189
- except Exception as e:
1190
- logger.error(f"KB ImgDL: Encrypt failed: {e}")
1191
- gr.Error(f"Encryption failed: {e}")
1192
- return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
1193
-
1194
- progress(0.6, desc="Generating carrier image...")
1195
- carrier_image = generate_brain_carrier_image(w=800, h=800, msg="iLearn Knowledge Base")
1196
-
1197
- try:
1198
- progress(0.7, desc="Embedding data...")
1199
- embedded_image = embed_data_in_image(carrier_image, data_bytes)
1200
- except ValueError as e:
1201
- logger.error(f"KB ImgDL: Embed failed: {e}")
1202
- gr.Error(f"Data is too large for this image size: {e}")
1203
- return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
1204
-
1205
- progress(0.8, desc="Adding visual overlay...")
1206
- keys_for_overlay = [f"Rule Count: {len(rules)}", f"Memory Count: {len(memories)}", "---"]
1207
- for r in rules[:5]:
1208
- match = re.search(r"\](.*)", r, re.DOTALL)
1209
- rule_content = match.group(1).strip() if match else r
1210
- keys_for_overlay.append(f"Rule: {rule_content[:40]}...")
1211
- if len(rules) > 5: keys_for_overlay.append("...")
1212
-
1213
- title_overlay = "Encrypted Data" if password and password.strip() else "Embedded Data"
1214
- final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
1215
-
1216
- progress(0.9, desc="Preparing final image and download file...")
1217
- try:
1218
- with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
1219
- final_image.save(tmpfile, format="PNG")
1220
- tmp_path = tmpfile.name
1221
-
1222
- progress(1.0, desc="Image created!")
1223
- gr.Info("Image created and is ready for download or copy.")
1224
- # Return updates to show the components with the new data
1225
- return gr.update(value=final_image, visible=True), gr.update(value=tmp_path, visible=True), "Success! Image created."
1226
- except Exception as e:
1227
- logger.error(f"KB ImgDL: Save failed: {e}")
1228
- gr.Error(f"Failed to save final image: {e}")
1229
- return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
1230
-
1231
-
1232
-
1233
-
1234
  def ui_create_kb_image_fn(password: str, content_to_include: list, progress=gr.Progress()):
1235
- """
1236
- Generates a KB image and returns a file path to both the display and download components.
1237
- """
1238
  include_rules = "Include Rules" in content_to_include
1239
  include_memories = "Include Memories" in content_to_include
1240
 
@@ -1277,26 +1280,21 @@ def ui_create_kb_image_fn(password: str, content_to_include: list, progress=gr.P
1277
  if include_rules: keys_for_overlay.append(f"Rule Count: {len(rules)}")
1278
  if include_memories: keys_for_overlay.append(f"Memory Count: {len(memories)}")
1279
 
1280
- title_overlay = "Encrypted Knowledge Base" if password and password.strip() else "iLearn Knowledge Base"
1281
  final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
1282
 
1283
  progress(0.9, desc="Preparing final image and download file...")
1284
  try:
1285
- # Create a temporary file and save the image as a PNG.
1286
  with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
1287
  final_image.save(tmpfile, format="PNG")
1288
  tmp_path = tmpfile.name
1289
  progress(1.0, desc="Image created!")
1290
- # Return the FILE PATH to both components.
1291
  return gr.update(value=tmp_path, visible=True), gr.update(value=tmp_path, visible=True), "Success! Image created."
1292
  except Exception as e:
1293
  logger.error(f"KB ImgDL: Save failed: {e}")
1294
  return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
1295
 
1296
  def ui_load_from_sources_fn(image_filepath: str, rules_file_obj: object, mems_file_obj: object, password: str, progress=gr.Progress()):
1297
- """
1298
- Loads data from one of the available sources with precedence: Image > Rules File > Memories File.
1299
- """
1300
  if image_filepath:
1301
  progress(0.1, desc="Image source detected. Starting image processing...")
1302
  return ui_upload_kb_from_image_fn(image_filepath, password, progress)
@@ -1312,7 +1310,6 @@ def ui_load_from_sources_fn(image_filepath: str, rules_file_obj: object, mems_fi
1312
  return "No file or image uploaded. Please provide a source file to load."
1313
 
1314
 
1315
- # --- Gradio UI Definition ---
1316
  with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-textbox, .gr-text-area, .gr-dropdown, .gr-json { border-radius: 8px; } .gr-group { border: 1px solid #e0e0e0; border-radius: 8px; padding: 10px; } .gr-row { gap: 10px; } .gr-tab { border-radius: 8px; } .status-text { font-size: 0.9em; color: #555; } .gr-json { max-height: 400px; overflow-y: auto; }") as demo:
1317
 
1318
  gr.Markdown(f"# 🤖 iLearn: An Autonomous Learning Agent {'(DEMO MODE)' if DEMO_MODE else ''}", elem_classes=["header"])
@@ -1332,6 +1329,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1332
  prov_sel_dd = gr.Dropdown(label="AI Provider", choices=available_providers, value=default_provider, interactive=True)
1333
  default_model_display = get_default_model_display_name_for_provider(default_provider) if default_provider else None
1334
  model_sel_dd = gr.Dropdown(label="AI Model", choices=get_model_display_names_for_provider(default_provider) if default_provider else [], value=default_model_display, interactive=True)
 
1335
  with gr.Group():
1336
  gr.Markdown("### System Prompt"); sys_prompt_tb = gr.Textbox(label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
1337
 
@@ -1344,13 +1342,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1344
  with gr.Row(variant="compact"):
1345
  user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1, max_lines=3)
1346
  send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
1347
- with gr.Accordion("📝 Detailed Response & Insights", open=False):
 
1348
  fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
1349
  dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
1350
- detect_out_md = gr.Markdown(visible=False)
1351
 
1352
  with gr.TabItem("🧠 Knowledge Base"):
1353
-
1354
  with gr.Tabs():
1355
  with gr.TabItem("🎛️ System"):
1356
  gr.Markdown("View and directly manage the current rules and memories in the system.")
@@ -1370,7 +1367,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1370
  with gr.Row():
1371
  rules_stat_tb = gr.Textbox(label="Rules Status", interactive=False, lines=1, elem_classes=["status-text"])
1372
  mems_stat_tb = gr.Textbox(label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"])
1373
-
1374
  with gr.Row():
1375
  with gr.Column():
1376
  gr.Markdown("### Text File Export")
@@ -1378,15 +1374,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1378
  dl_mems_btn = gr.DownloadButton("⬇️ Download Memories (.jsonl)", value=None)
1379
  gr.Row()
1380
  if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
1381
-
1382
-
1383
  with gr.Column():
1384
  gr.Markdown("### Image Export")
1385
  with gr.Group():
1386
  save_kb_password_tb = gr.Textbox(label="Password (optional for encryption)", type="password")
1387
  save_kb_include_cbg = gr.CheckboxGroup(label="Content to Include", choices=["Include Rules", "Include Memories"], value=["Include Rules", "Include Memories"])
1388
  create_kb_img_btn = gr.Button("✨ Create KB Image", variant="secondary")
1389
- # Removed type="pil" to allow Gradio to handle the raw PNG bytes correctly
1390
  kb_image_display_output = gr.Image(label="Generated Image (Right-click to copy)", visible=False)
1391
  kb_image_download_output = gr.DownloadButton("⬇️ Download Image File", visible=False)
1392
 
@@ -1394,44 +1387,35 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1394
  gr.Markdown("Import rules, memories, or a full KB from local files or a portable PNG image.")
1395
  load_status_tb = gr.Textbox(label="Load Operation Status", interactive=False, lines=2)
1396
  load_kb_password_tb = gr.Textbox(label="Password (for decrypting images)", type="password")
1397
-
1398
  with gr.Group():
1399
  gr.Markdown("#### Sources (Priority: Image > Rules File > Memories File)")
1400
  with gr.Row():
1401
  upload_kb_img_fobj = gr.Image(label="1. Image Source", type="filepath", sources=["upload", "clipboard"], interactive=not DEMO_MODE)
1402
  upload_rules_fobj = gr.File(label="2. Rules File Source (.txt/.jsonl)", file_types=[".txt", ".jsonl"], interactive=not DEMO_MODE)
1403
  upload_mems_fobj = gr.File(label="3. Memories File Source (.json/.jsonl)", file_types=[".jsonl", ".json"], interactive=not DEMO_MODE)
1404
-
1405
  load_master_btn = gr.Button("⬆️ Load from Sources", variant="primary", interactive=not DEMO_MODE)
1406
- gr.Examples(
1407
-
1408
  examples=[
1409
- ["./evolutions/e0.01.01.png", ""],
1410
-
1411
  ],
1412
  inputs=[upload_kb_img_fobj, load_kb_password_tb],
1413
  label="Click an Example to Load Data"
1414
  )
1415
- # --- Event Wiring ---
1416
  def dyn_upd_model_dd(sel_prov_dyn: str):
1417
  models_dyn = get_model_display_names_for_provider(sel_prov_dyn); def_model_dyn = get_default_model_display_name_for_provider(sel_prov_dyn)
1418
  return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
1419
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
1420
 
1421
-
1422
-
1423
- # Chat Tab
1424
- chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
1425
- chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
1426
  chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
1427
  send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
1428
 
1429
- # KB Tab -> System
1430
  save_edited_rules_btn.click(fn=save_edited_rules_action_fn, inputs=[rules_disp_ta], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1431
  clear_rules_btn.click(fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."), outputs=rules_stat_tb, show_progress=False).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1432
  clear_mems_btn.click(fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."), outputs=mems_stat_tb, show_progress=False).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
1433
 
1434
- # KB Tab -> Save KB
1435
  dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn, show_progress=False)
1436
  dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn, show_progress=False)
1437
  create_kb_img_btn.click(
@@ -1441,7 +1425,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1441
  show_progress="full"
1442
  )
1443
 
1444
- # KB Tab -> Load KB
1445
  load_master_btn.click(
1446
  fn=ui_load_from_sources_fn,
1447
  inputs=[upload_kb_img_fobj, upload_rules_fobj, upload_mems_fobj, load_kb_password_tb],
@@ -1453,20 +1436,16 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1453
  fn=ui_refresh_memories_display_fn, outputs=mems_disp_json
1454
  )
1455
 
1456
- # Sidebar FAISS button
1457
  if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
1458
  def save_faiss_action_with_feedback_sidebar_fn():
1459
  try: save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
1460
  except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
1461
  save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
1462
 
1463
- # App Load
1464
- app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, detect_out_md, fmt_report_tb, dl_report_btn]
1465
  demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
1466
 
1467
 
1468
-
1469
-
1470
  if __name__ == "__main__":
1471
  logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
1472
  app_port = int(os.getenv("GRADIO_PORT", 7860))
@@ -1474,5 +1453,5 @@ if __name__ == "__main__":
1474
  app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "false"
1475
  app_share = os.getenv("GRADIO_SHARE", "False").lower() == "true"
1476
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
1477
- demo.queue().launch(server_name=app_server, server_port=app_port, debug=app_debug, share=app_share, mcp_server=True)
1478
  logger.info("Gradio application shut down.")
 
1
  import os
2
  DEMO_MODE = False
3
+ MEMORY_STORAGE_TYPE = "RAM"
4
 
5
  HF_DATASET_MEMORY_REPO = "broadfield-dev/ai-brain"
6
  HF_DATASET_RULES_REPO = "broadfield-dev/ai-rules"
7
 
 
8
  os.environ['STORAGE_BACKEND'] = MEMORY_STORAGE_TYPE
9
  if MEMORY_STORAGE_TYPE == "HF_DATASET":
10
  os.environ['HF_MEMORY_DATASET_REPO'] = HF_DATASET_MEMORY_REPO
11
  os.environ['HF_RULES_DATASET_REPO'] = HF_DATASET_RULES_REPO
 
12
 
13
 
14
  import json
 
20
  import time
21
  import tempfile
22
  import xml.etree.ElementTree as ET
 
 
23
  import io
24
  import struct
25
  import numpy as np
 
28
  from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
29
  from cryptography.hazmat.primitives import hashes
30
  from cryptography.exceptions import InvalidTag
 
31
  import random
32
 
33
+ load_dotenv()
34
 
35
  from model_logic import (
36
  get_available_providers, get_model_display_names_for_provider,
37
  get_default_model_display_name_for_provider, call_model_stream, MODELS_BY_PROVIDER
38
  )
39
+
40
  from memory_logic import (
41
  initialize_memory_system,
42
  add_memory_entry, retrieve_memories_semantic, get_all_memories_cached, clear_all_memory_data_backend,
 
52
  if logging.getLogger(lib_name): logging.getLogger(lib_name).setLevel(logging.WARNING)
53
 
54
  WEB_SEARCH_ENABLED = os.getenv("WEB_SEARCH_ENABLED", "true").lower() == "true"
 
 
55
  MAX_HISTORY_TURNS = int(os.getenv("MAX_HISTORY_TURNS", 7))
56
  current_chat_session_history = []
57
 
 
60
  "Your Name is Node. You are a Helpful AI Assistant, and your goal is to improve yourself, so that you can improve everybody elses life. Your job is to answer questions and perform research tasks accurately and thoroughly. You can use tools like web search and page browsing. When providing information from the web, cite your sources if possible. If asked to perform a task beyond your capabilities, explain politely. Be concise unless asked for detail."
61
  )
62
 
 
63
  LOAD_RULES_FILE = os.getenv("LOAD_RULES_FILE")
64
  LOAD_MEMORIES_FILE = os.getenv("LOAD_MEMORIES_FILE")
65
+ logger.info(f"App Config: WebSearch={WEB_SEARCH_ENABLED}, MemoryBackend={MEMORY_STORAGE_BACKEND}")
66
  logger.info(f"Startup loading: Rules from {LOAD_RULES_FILE or 'None'}, Memories from {LOAD_MEMORIES_FILE or 'None'}")
67
 
68
 
 
69
  KEY_SIZE = 32
70
  SALT_SIZE = 16
71
  NONCE_SIZE = 12
72
  TAG_SIZE = 16
73
  PBKDF2_ITERATIONS = 480000
74
+ LENGTH_HEADER_SIZE = 4
75
  PREFERRED_FONTS = ["Arial", "Helvetica", "DejaVu Sans", "Verdana", "Calibri", "sans-serif"]
76
  MAX_KEYS_TO_DISPLAY_OVERLAY = 15
77
  def convert_pil_to_png_bytes(image: Image.Image) -> bytes:
 
78
  with io.BytesIO() as buffer:
79
  image.save(buffer, format="PNG")
80
  return buffer.getvalue()
 
155
  return dd
156
 
157
  def generate_brain_carrier_image(w=800, h=800) -> Image.Image:
 
 
 
 
 
158
  center_x, center_y = w / 2, h / 2
159
  y_coords, x_coords = np.mgrid[0:h, 0:w]
160
 
 
161
  distance = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2)
162
  max_distance = np.sqrt(center_x**2 + center_y**2)
163
+
 
164
  distance_norm = distance / max_distance
165
+
166
+ bg_center_color = np.array([20, 25, 40])
167
+ bg_outer_color = np.array([0, 0, 0])
168
+
 
 
 
169
  gradient = bg_outer_color + (bg_center_color - bg_outer_color) * (1 - distance_norm[..., np.newaxis])
170
+
 
171
  img = Image.fromarray(gradient.astype(np.uint8), 'RGB')
172
  draw = ImageDraw.Draw(img)
173
 
 
 
 
174
  num_distant_stars = int((w * h) / 200)
175
  for _ in range(num_distant_stars):
176
  x, y = random.randint(0, w - 1), random.randint(0, h - 1)
177
  brightness = random.randint(30, 90)
178
+ draw.point((x, y), fill=(brightness, brightness, int(brightness * 1.1)))
179
 
 
180
  num_main_stars = int((w * h) / 1000)
181
  star_colors = [
182
+ (255, 255, 255),
183
+ (220, 230, 255),
184
+ (255, 240, 220),
185
  ]
186
+
187
  for _ in range(num_main_stars):
188
  x, y = random.randint(0, w - 1), random.randint(0, h - 1)
189
  dist_from_center = np.sqrt((x - center_x)**2 + (y - center_y)**2)
190
  dist_ratio = min(dist_from_center / max_distance, 1.0)
191
+
 
192
  size = 0.5 + (2.5 * (dist_ratio ** 2))
193
  brightness = 120 + (135 * (dist_ratio ** 1.5))
194
+
 
195
  color = random.choice(star_colors)
196
+
 
197
  final_color = tuple(int(c * (brightness / 255.0)) for c in color)
198
+
 
199
  glow_size = size * 3
200
+ glow_color = tuple(int(c * 0.3) for c in final_color)
201
  draw.ellipse([x - glow_size, y - glow_size, x + glow_size, y + glow_size], fill=glow_color)
202
 
203
+ if random.random() < 0.15:
 
204
  draw.line([x-size, y, x+size, y], fill=final_color, width=1)
205
  draw.line([x, y-size, x, y+size], fill=final_color, width=1)
206
  else:
 
207
  draw.ellipse([x - size, y - size, x + size, y + size], fill=final_color)
208
+
209
  return img
210
 
211
 
 
229
  except: return len(text_str) * 8, 10
230
 
231
  def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, title: str = "Data Embedded") -> Image.Image:
 
232
  img_overlayed = image.copy().convert("RGBA")
233
  draw = ImageDraw.Draw(img_overlayed, "RGBA")
234
  width, height = img_overlayed.size
 
256
 
257
  draw.rectangle([20, box_y0, width - 20, height - 20], fill=overlay_color)
258
  current_y = box_y0 + box_padding
259
+
260
  for i, key_text in enumerate(lines):
261
  draw.text((text_start_x, current_y), key_text, fill=key_color, font=font_regular)
262
  if i < len(line_heights):
 
264
 
265
  final_image_rgb = Image.new("RGB", img_overlayed.size, (0, 0, 0))
266
  final_image_rgb.paste(img_overlayed, (0, 0), img_overlayed)
267
+
268
  return final_image_rgb
269
 
270
 
 
271
  def format_insights_for_prompt(retrieved_insights_list: list[str]) -> tuple[str, list[dict]]:
272
  if not retrieved_insights_list:
273
  return "No specific guiding principles or learned insights retrieved.", []
 
313
  logger.error(f"METRICS_GEN Error: {e}", exc_info=False)
314
  return {"takeaway": "N/A", "response_success_score": 0.5, "future_confidence_score": 0.5, "error": str(e)}
315
 
316
+ def _generate_action_plan(
317
+ original_query: str, provider_name: str, model_display_name: str, ui_api_key_override: str | None, chat_history: list[dict]
318
+ ) -> dict:
319
+ history_str = "\n".join([f"{msg['role']}: {msg['content'][:150]}" for msg in chat_history[-4:]])
320
+
321
+ plan_sys_prompt = """You are a master planner AI. Your goal is to decide the most efficient path to answer a user's query. You have two choices:
322
+
323
+ 1. **fast_response**: If the query is simple, conversational, or can be answered without external tools, choose this.
324
+ 2. **multi_step_plan**: If the query requires research, data retrieval, or complex reasoning, create a plan.
325
+
326
+ Your plan can use the following tools:
327
+ - `web_search`: Use for finding current, public information. The `task` should be a clear research goal (e.g., "Find the population of Tokyo in 2023").
328
+ - `memory_search`: Use for recalling past interactions or learned facts. The `task` should be a question to ask your memory (e.g., "What did the user previously say their name was?").
329
+ - `think`: A step for internal reflection. Use it to analyze the data gathered so far and decide if the plan needs adjustment or if enough information is present to proceed to the final answer. The `task` should be a question to yourself (e.g., "Is the gathered information sufficient to answer the user's main question?").
330
+ - `respond`: This should ALWAYS be the final step in a multi_step_plan. The `task` is always "Synthesize all information from the scratchpad and provide a comprehensive final answer to the user."
331
+
332
+ **Output format MUST be a single, valid JSON object.**
333
+
334
+ **Example for a simple query:**
335
+ {"action_type": "fast_response", "reason": "The user is just saying hello."}
336
+
337
+ **Example for a complex query:**
338
+ {
339
+ "action_type": "multi_step_plan",
340
+ "plan": [
341
+ {"tool": "memory_search", "task": "What has the user previously expressed interest in regarding AI topics?"},
342
+ {"tool": "web_search", "task": "Find recent advancements in large language models since early 2023."},
343
+ {"tool": "web_scrape", "task": "https://example.com"},
344
+ {"tool": "think", "task": "Based on the user's interests and recent advancements, what are the key points to highlight?"},
345
+ {"tool": "respond", "task": "Synthesize all information from the scratchpad and provide a comprehensive final answer to the user."}
346
+ ]
347
+ }
348
+ """
349
+ plan_user_prompt = f"Recent Conversation History:\n---\n{history_str}\n---\n\nUser Query: \"{original_query}\"\n\nBased on the query and history, what is the best action plan? Respond with JSON only."
350
+ plan_messages = [{"role": "system", "content": plan_sys_prompt}, {"role": "user", "content": plan_user_prompt}]
351
 
352
+ try:
353
+ response_chunks = list(call_model_stream(
354
+ provider=provider_name,
355
+ model_display_name=model_display_name,
356
+ messages=plan_messages,
357
+ api_key_override=ui_api_key_override,
358
+ temperature=0.0,
359
+ max_tokens=1000
360
+ ))
361
+ resp_str = "".join(response_chunks).strip()
362
+ json_match = re.search(r"\{.*\}", resp_str, re.DOTALL)
363
+ if json_match:
364
+ plan_data = json.loads(json_match.group(0))
365
+ return plan_data
366
+ except Exception as e:
367
+ logger.error(f"PLAN_GEN: Failed to generate or parse action plan: {e}")
368
+
369
+ return {
370
+ "action_type": "multi_step_plan",
371
+ "plan": [
372
+ {"tool": "web_search", "task": original_query},
373
+ {"tool": "respond", "task": "Synthesize all information from the scratchpad and provide a comprehensive final answer to the user."}
374
+ ]
375
+ }
376
+
377
+ def process_user_interaction_gradio(
378
+ user_input: str,
379
+ max_research_steps: int,
380
+ provider_name: str,
381
+ model_display_name: str,
382
+ chat_history: list[dict],
383
+ custom_system_prompt: str = None,
384
+ ui_api_key_override: str = None,
385
+ ):
386
  process_start_time = time.time()
387
  request_id = os.urandom(4).hex()
388
+ logger.info(f"PUI_GRADIO [{request_id}] Start. User: '{user_input[:50]}...' Max Steps: {max_research_steps}")
389
+
390
+ yield "status", "<i>[Deciding on an action plan...]</i>"
391
+ action_plan_data = _generate_action_plan(user_input, provider_name, model_display_name, ui_api_key_override, chat_history)
392
+
393
+ action_type = action_plan_data.get("action_type")
394
+
395
+ if action_type == "fast_response":
396
+ yield "status", "<i>[Executing fast response...]</i>"
397
+ yield "plan", [{"tool": "fast_response", "task": action_plan_data.get("reason", "Direct answer.")}]
398
+
399
+ now_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
400
+ final_sys_prompt = custom_system_prompt or DEFAULT_SYSTEM_PROMPT
401
+ final_sys_prompt = f"Current Date/Time: {now_str}.\n\n" + final_sys_prompt
402
+
403
+ messages_for_llm = [{"role": "system", "content": final_sys_prompt}] + chat_history + [{"role": "user", "content": user_input}]
404
+
405
+ streamed_response = ""
406
+ try:
407
+ for chunk in call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=messages_for_llm, api_key_override=ui_api_key_override, temperature=0.7, max_tokens=3000):
408
+ streamed_response += chunk
409
+ yield "response_chunk", chunk
410
+ except Exception as e:
411
+ streamed_response = f"\n\n(Error during fast response: {str(e)[:150]})"
412
+ yield "response_chunk", streamed_response
413
+
414
+ final_bot_text = streamed_response.strip()
415
+ yield "final_response", {"response": final_bot_text}
416
+ return
417
+
418
+ plan = action_plan_data.get("plan", [])
419
+ if not plan:
420
+ plan = [{"tool": "web_search", "task": user_input}, {"tool": "respond", "task": "Synthesize a response."}]
421
+
422
+ yield "plan", plan
423
+
424
+ research_scratchpad = ""
425
+ now_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
426
+
427
+ for i, step_action in enumerate(plan):
428
+ tool = step_action.get("tool")
429
+ task = step_action.get("task")
430
+
431
+ if tool == 'respond':
432
+ break
433
+
434
+ if i + 1 > max_research_steps:
435
+ research_scratchpad += f"\n\n---NOTE: Maximum research step budget of {max_research_steps} reached. Proceeding to final response.---\n"
436
+ logger.warning(f"PUI_GRADIO [{request_id}]: Max research steps ({max_research_steps}) reached.")
437
+ break
438
+
439
+ yield "status", f"<i>[Executing Step {i+1}/{len(plan)-1}: {tool} -> {task[:70]}...]</i>"
440
+
441
+ step_findings = f"Step {i+1} ({tool}: '{task}'): "
442
+
443
+ if tool == 'web_search':
444
  try:
445
+ web_results = search_and_scrape_duckduckgo(task, num_results=2)
446
+ scraped_content = "\n".join([f"Source:\nURL:{r.get('url','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:1500]}\n---" for r in web_results]) if web_results else "No results found."
447
+ synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
448
+ summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
449
+ step_findings += summary
450
+ except Exception as e:
451
+ try:
452
+ web_results = search_and_scrape_google(task, num_results=2)
453
+ scraped_content = "\n".join([f"Source:\nURL:{r.get('url','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:1500]}\n---" for r in web_results]) if web_results else "No results found."
454
+ synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
455
+ summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
456
+ step_findings += summary
457
+ except Exception as e:
458
+ step_findings += f"Error during web search: {e}"
459
+
460
+ elif tool == 'web_scrape':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
  try:
462
+ web_results = scrape_url(task)
463
+ scraped_content = "\n".join([f"Source:\nURL:{r.get('url','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:1500]}\n---" for r in web_results]) if web_results else "No results found."
464
+ synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
465
+ summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
466
+ step_findings += summary
467
+ except Exception as e:
468
+ step_findings += f"Error during web search: {e}"
469
+
470
+ elif tool == 'memory_search':
471
+ try:
472
+ retrieved_mems = retrieve_memories_semantic(task, k=3)
473
+ if retrieved_mems:
474
+ memory_context = "\n".join([f"- User: {m.get('user_input','')} -> AI: {m.get('bot_response','')} (Takeaway: {m.get('metrics',{}).get('takeaway','N/A')})" for m in retrieved_mems])
475
+ step_findings += f"Found relevant memories:\n{memory_context}"
476
+ else:
477
+ step_findings += "No relevant memories found."
478
+ except Exception as e:
479
+ step_findings += f"Error during memory search: {e}"
480
+
481
+ elif tool == 'think':
482
+ try:
483
+ think_prompt = f"Original Query: '{user_input}'\n\nResearch Scratchpad:\n```\n{research_scratchpad}\n```\n\nMy current thinking task is: '{task}'. Based on the scratchpad, what is the conclusion of this thinking step?"
484
+ thought = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": think_prompt}], api_key_override=ui_api_key_override, temperature=0.3, max_tokens=500)))
485
+ step_findings += f"Conclusion: {thought}"
486
+ except Exception as e:
487
+ step_findings += f"Error during thinking step: {e}"
488
+ else:
489
+ step_findings += "Unknown tool specified in plan."
490
+
491
+ research_scratchpad += f"\n\n---\n{step_findings}\n---"
492
+ yield "step_result", {"step": i + 1, "tool": tool, "task": task, "result": step_findings}
493
+
494
+ yield "status", "<i>[Synthesizing final report...]</i>"
495
+
496
+ final_sys_prompt = custom_system_prompt or DEFAULT_SYSTEM_PROMPT
497
+ final_sys_prompt += f"\n\nCurrent Date/Time: {now_str}. You have just completed a research plan. Synthesize the information in the 'Research Scratchpad' into a final, comprehensive answer. Cite sources by including URLs if available."
498
+ final_user_prompt = f"Original user query: \"{user_input}\"\n\nResearch Scratchpad:\n```\n{research_scratchpad}\n```\n\nNow, provide the final, synthesized answer to the user."
499
+ final_messages = [{"role": "system", "content": final_sys_prompt}, {"role": "user", "content": final_user_prompt}]
500
+
501
+ streamed_response = ""
502
  try:
503
+ for chunk in call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=final_messages, api_key_override=ui_api_key_override, temperature=0.6, max_tokens=3000):
504
+ streamed_response += chunk
505
+ yield "response_chunk", chunk
506
+ except Exception as e:
507
+ error_msg = f"\n\n(Error during final synthesis: {str(e)[:150]})"
508
+ streamed_response += error_msg
509
+ yield "response_chunk", error_msg
510
+
511
+ final_bot_text = streamed_response.strip() or "(No response or error during synthesis.)"
512
  logger.info(f"PUI_GRADIO [{request_id}]: Finished. Total: {time.time() - process_start_time:.2f}s. Resp len: {len(final_bot_text)}")
513
+ yield "final_response", {"response": final_bot_text}
514
 
515
+ def perform_post_interaction_learning(user_input: str, bot_response: str, provider: str, model_disp_name: str, api_key_override: str = None):
516
  task_id = os.urandom(4).hex()
517
  logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: START User='{user_input[:40]}...', Bot='{bot_response[:40]}...'")
518
  learning_start_time = time.time()
519
+ significant_learnings_summary = []
520
 
521
  try:
522
  metrics = generate_interaction_metrics(user_input, bot_response, provider, model_disp_name, api_key_override)
 
533
  ABSOLUTELY NO other text, explanations, or markdown should precede or follow this XML structure.
534
  Each `<operation>` element must contain the following child elements:
535
  1. `<action>`: A string, either `"add"` (for entirely new rules) or `"update"` (to replace an existing rule with a better one).
536
+ 2. `<insight>`: The full, refined insight text including its `[TYPE|SCORE]` prefix (e.g., `[CORE_RULE|1.0] My name is [Name], an AI assistant.`). Multi-line insight text can be placed directly within this tag; XML handles newlines naturally.
537
  3. `<old_insight_to_replace>`: (ONLY for `"update"` action) The *exact, full text* of an existing insight that the new `<insight>` should replace. If action is `"add"`, this element should be omitted or empty.
538
  **XML Structure Example:**
539
  <operations_list>
540
  <operation>
541
  <action>update</action>
542
+ <insight>[CORE_RULE|1.0] I am [Name], an AI assistant.
543
  My purpose is to help with research.</insight>
544
  <old_insight_to_replace>[CORE_RULE|0.9] My name is Assistant.</old_insight_to_replace>
545
  </operation>
 
560
  """
561
  insight_user_prompt = f"""Interaction Summary:\n{summary}\n
562
  Potentially Relevant Existing Rules (Review these carefully. Your main goal is to consolidate CORE_RULEs and then identify other changes/additions based on the Interaction Summary and these existing rules):\n{existing_rules_ctx}\n
 
563
  Task: Based on your three-step reflection process (Core Identity, New Learnings, Refinements):
564
  1. **Consolidate CORE_RULEs:** Merge similar identity/purpose rules from "Potentially Relevant Existing Rules" into single, definitive statements using "update" operations. Replace multiple old versions with the new canonical one.
565
  2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
 
624
  logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to invalid insight_text format from XML: '{insight_text[:100]}...'")
625
  continue
626
 
 
627
  if action == "add":
628
  success, status_msg = add_rule_entry(insight_text)
629
  if success:
630
  processed_count +=1
 
631
  if insight_text.upper().startswith("[CORE_RULE"):
632
  significant_learnings_summary.append(f"New Core Rule Added: {insight_text}")
633
  else: logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (add from XML): Failed to add rule '{insight_text[:50]}...'. Status: {status_msg}")
634
  elif action == "update":
635
+ if old_insight and old_insight != insight_text:
636
+ remove_success = remove_rule_entry(old_insight)
637
+ if not remove_success:
638
+ logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Failed to remove old rule '{old_insight[:50]}...' before adding new.")
 
 
 
 
 
 
639
 
640
  success, status_msg = add_rule_entry(insight_text)
641
  if success:
642
  processed_count +=1
 
643
  if insight_text.upper().startswith("[CORE_RULE"):
644
+ significant_learnings_summary.append(f"Core Rule Updated to: {insight_text}")
645
  else: logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Failed to add/update rule '{insight_text[:50]}...'. Status: {status_msg}")
646
  else:
647
  logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}' from XML.")
 
669
  logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: END. Total: {time.time() - learning_start_time:.2f}s")
670
 
671
 
672
+ def handle_gradio_chat_submit(user_msg_txt: str, max_research_steps: int, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
673
  global current_chat_session_history
674
  cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
675
  updated_rules_text = ui_refresh_rules_display_fn()
676
  updated_mems_json = ui_refresh_memories_display_fn()
677
+ plan_md_output = gr.Markdown(visible=False)
678
+ final_report_tb = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
679
+ dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
680
+
681
  if not user_msg_txt.strip():
682
  status_txt = "Error: Empty message."
683
  updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
684
+ yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
685
  return
686
 
687
  updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
688
+ yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
689
 
690
+ internal_hist = list(current_chat_session_history)
 
 
 
 
 
691
 
692
+ final_bot_resp_acc = ""
693
  temp_dl_file_path = None
694
+
695
  try:
696
+ processor_gen = process_user_interaction_gradio(
697
+ user_input=user_msg_txt,
698
+ max_research_steps=max_research_steps,
699
+ provider_name=sel_prov_name,
700
+ model_display_name=sel_model_disp_name,
701
+ chat_history=internal_hist,
702
+ custom_system_prompt=cust_sys_prompt.strip() or None,
703
+ ui_api_key_override=ui_api_key.strip() if ui_api_key else None
704
+ )
705
+
706
  curr_bot_disp_msg = ""
707
+ full_plan = []
708
+ step_results = {}
709
+
710
  for upd_type, upd_data in processor_gen:
711
  if upd_type == "status":
712
  status_txt = upd_data
713
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
714
+ updated_gr_hist[-1] = (user_msg_txt, f"<i>{status_txt}</i>")
715
+ elif upd_type == "plan":
716
+ full_plan = upd_data
717
+ plan_md = "### Action Plan\n" + "\n".join([f"**Step {i+1} ({step.get('tool')})**: {step.get('task')}" for i, step in enumerate(full_plan)])
718
+ plan_md_output = gr.Markdown(value=plan_md, visible=True)
719
+ elif upd_type == "step_result":
720
+ step_num = upd_data["step"]
721
+ step_results[step_num] = upd_data["result"]
722
+ results_so_far = "### Research Log\n"
723
+ for i in range(1, len(full_plan)):
724
+ if i in step_results:
725
+ results_so_far += f"**Step {i} ({full_plan[i-1].get('tool')})**: ✅ Completed\n"
726
+ elif i <= len(step_results) + 1:
727
+ results_so_far += f"**Step {i} ({full_plan[i-1].get('tool')})**: ⏳ In progress...\n"
728
+ else:
729
+ results_so_far += f"**Step {i} ({full_plan[i-1].get('tool')})**: - Pending\n"
730
+
731
+ if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
732
+ updated_gr_hist[-1] = (user_msg_txt, results_so_far)
733
  elif upd_type == "response_chunk":
734
  curr_bot_disp_msg += upd_data
735
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
736
  updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
737
+ elif upd_type == "final_response":
738
+ final_bot_resp_acc = upd_data["response"]
739
  status_txt = "Response generated. Processing learning..."
740
+ if not curr_bot_disp_msg and final_bot_resp_acc: curr_bot_disp_msg = final_bot_resp_acc
741
+
742
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
743
  updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
744
+ final_report_tb = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
 
745
 
746
  if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
747
  try:
748
  with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md", encoding='utf-8') as tmpfile:
749
  tmpfile.write(curr_bot_disp_msg)
750
  temp_dl_file_path = tmpfile.name
751
+ dl_report_btn = gr.DownloadButton(value=temp_dl_file_path, visible=True, interactive=True)
752
  except Exception as e:
753
  logger.error(f"Error creating temp file for download: {e}", exc_info=False)
754
+ dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False, label="Download Error")
755
  else:
756
+ dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
 
 
 
757
 
758
+ yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
759
 
760
+ if upd_type == "final_response": break
761
 
762
  except Exception as e:
763
+ logger.error(f"Chat handler error during main processing: {e}", exc_info=True)
764
+ status_txt = f"Error: {str(e)[:100]}"
765
  error_message_for_chat = f"Sorry, an error occurred during response generation: {str(e)[:100]}"
766
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
767
  updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
768
+ final_report_tb = gr.Textbox(value=error_message_for_chat, interactive=True)
769
+ dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
770
+ plan_md_output = gr.Markdown(value="*Error processing request.*", visible=True)
 
 
 
771
  current_rules_text_on_error = ui_refresh_rules_display_fn()
772
  current_mems_json_on_error = ui_refresh_memories_display_fn()
773
+ yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, current_rules_text_on_error, current_mems_json_on_error)
 
774
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
775
  try: os.unlink(temp_dl_file_path)
776
  except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path} after error: {e_unlink}")
 
778
 
779
  if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
780
  current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
781
+
 
 
 
 
782
  status_txt = "<i>[Performing post-interaction learning...]</i>"
783
  current_rules_text_before_learn = ui_refresh_rules_display_fn()
784
  current_mems_json_before_learn = ui_refresh_memories_display_fn()
785
+ yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, current_rules_text_before_learn, current_mems_json_before_learn)
786
 
787
  try:
788
  perform_post_interaction_learning(
 
790
  bot_response=final_bot_resp_acc,
791
  provider=sel_prov_name,
792
  model_disp_name=sel_model_disp_name,
 
793
  api_key_override=ui_api_key.strip() if ui_api_key else None
794
  )
795
  status_txt = "Response & Learning Complete."
796
  except Exception as e_learn:
797
  logger.error(f"Error during post-interaction learning: {e_learn}", exc_info=True)
798
  status_txt = "Response complete. Error during learning."
 
 
 
799
  else:
800
+ status_txt = "Processing finished; no valid response or error occurred."
801
 
802
  updated_rules_text = ui_refresh_rules_display_fn()
803
  updated_mems_json = ui_refresh_memories_display_fn()
804
 
805
+ yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
806
 
807
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
808
  try: os.unlink(temp_dl_file_path)
809
  except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
810
 
811
 
 
812
  def load_rules_from_file(filepath: str | None):
813
  if not filepath:
814
  logger.info("LOAD_RULES_FILE environment variable not set. Skipping rules loading from file.")
 
958
  return added_count, format_error_count, save_error_count
959
 
960
 
 
961
  def convert_kb_to_kv_string(rules: list[str], memories: list[dict], include_rules: bool, include_memories: bool) -> str:
 
962
  lines = ["# iLearn Knowledge Base Export", f"# Exported on: {datetime.utcnow().isoformat()}Z"]
963
 
964
  if include_rules:
 
1221
  gr.DownloadButton(interactive=False, value=None, visible=False))
1222
 
1223
 
 
 
1224
  placeholder_filename = "placeholder_image.png"
1225
  try:
1226
  if not os.path.exists(placeholder_filename):
 
1237
  logger.error(f"Could not create placeholder image. The examples may not load correctly. Error: {e}")
1238
 
1239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240
  def ui_create_kb_image_fn(password: str, content_to_include: list, progress=gr.Progress()):
 
 
 
1241
  include_rules = "Include Rules" in content_to_include
1242
  include_memories = "Include Memories" in content_to_include
1243
 
 
1280
  if include_rules: keys_for_overlay.append(f"Rule Count: {len(rules)}")
1281
  if include_memories: keys_for_overlay.append(f"Memory Count: {len(memories)}")
1282
 
1283
+ title_overlay = "Encrypted KB" if password and password.strip() else "iLearn KB"
1284
  final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
1285
 
1286
  progress(0.9, desc="Preparing final image and download file...")
1287
  try:
 
1288
  with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
1289
  final_image.save(tmpfile, format="PNG")
1290
  tmp_path = tmpfile.name
1291
  progress(1.0, desc="Image created!")
 
1292
  return gr.update(value=tmp_path, visible=True), gr.update(value=tmp_path, visible=True), "Success! Image created."
1293
  except Exception as e:
1294
  logger.error(f"KB ImgDL: Save failed: {e}")
1295
  return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
1296
 
1297
  def ui_load_from_sources_fn(image_filepath: str, rules_file_obj: object, mems_file_obj: object, password: str, progress=gr.Progress()):
 
 
 
1298
  if image_filepath:
1299
  progress(0.1, desc="Image source detected. Starting image processing...")
1300
  return ui_upload_kb_from_image_fn(image_filepath, password, progress)
 
1310
  return "No file or image uploaded. Please provide a source file to load."
1311
 
1312
 
 
1313
  with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-textbox, .gr-text-area, .gr-dropdown, .gr-json { border-radius: 8px; } .gr-group { border: 1px solid #e0e0e0; border-radius: 8px; padding: 10px; } .gr-row { gap: 10px; } .gr-tab { border-radius: 8px; } .status-text { font-size: 0.9em; color: #555; } .gr-json { max-height: 400px; overflow-y: auto; }") as demo:
1314
 
1315
  gr.Markdown(f"# 🤖 iLearn: An Autonomous Learning Agent {'(DEMO MODE)' if DEMO_MODE else ''}", elem_classes=["header"])
 
1329
  prov_sel_dd = gr.Dropdown(label="AI Provider", choices=available_providers, value=default_provider, interactive=True)
1330
  default_model_display = get_default_model_display_name_for_provider(default_provider) if default_provider else None
1331
  model_sel_dd = gr.Dropdown(label="AI Model", choices=get_model_display_names_for_provider(default_provider) if default_provider else [], value=default_model_display, interactive=True)
1332
+ research_steps_slider = gr.Slider(label="Max Research Steps", minimum=1, maximum=10, step=1, value=3, interactive=True)
1333
  with gr.Group():
1334
  gr.Markdown("### System Prompt"); sys_prompt_tb = gr.Textbox(label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
1335
 
 
1342
  with gr.Row(variant="compact"):
1343
  user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1, max_lines=3)
1344
  send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
1345
+ with gr.Accordion("📝 Detailed Response & Plan", open=False):
1346
+ plan_display_md = gr.Markdown(visible=False)
1347
  fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
1348
  dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
 
1349
 
1350
  with gr.TabItem("🧠 Knowledge Base"):
 
1351
  with gr.Tabs():
1352
  with gr.TabItem("🎛️ System"):
1353
  gr.Markdown("View and directly manage the current rules and memories in the system.")
 
1367
  with gr.Row():
1368
  rules_stat_tb = gr.Textbox(label="Rules Status", interactive=False, lines=1, elem_classes=["status-text"])
1369
  mems_stat_tb = gr.Textbox(label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"])
 
1370
  with gr.Row():
1371
  with gr.Column():
1372
  gr.Markdown("### Text File Export")
 
1374
  dl_mems_btn = gr.DownloadButton("⬇️ Download Memories (.jsonl)", value=None)
1375
  gr.Row()
1376
  if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
 
 
1377
  with gr.Column():
1378
  gr.Markdown("### Image Export")
1379
  with gr.Group():
1380
  save_kb_password_tb = gr.Textbox(label="Password (optional for encryption)", type="password")
1381
  save_kb_include_cbg = gr.CheckboxGroup(label="Content to Include", choices=["Include Rules", "Include Memories"], value=["Include Rules", "Include Memories"])
1382
  create_kb_img_btn = gr.Button("✨ Create KB Image", variant="secondary")
 
1383
  kb_image_display_output = gr.Image(label="Generated Image (Right-click to copy)", visible=False)
1384
  kb_image_download_output = gr.DownloadButton("⬇️ Download Image File", visible=False)
1385
 
 
1387
  gr.Markdown("Import rules, memories, or a full KB from local files or a portable PNG image.")
1388
  load_status_tb = gr.Textbox(label="Load Operation Status", interactive=False, lines=2)
1389
  load_kb_password_tb = gr.Textbox(label="Password (for decrypting images)", type="password")
 
1390
  with gr.Group():
1391
  gr.Markdown("#### Sources (Priority: Image > Rules File > Memories File)")
1392
  with gr.Row():
1393
  upload_kb_img_fobj = gr.Image(label="1. Image Source", type="filepath", sources=["upload", "clipboard"], interactive=not DEMO_MODE)
1394
  upload_rules_fobj = gr.File(label="2. Rules File Source (.txt/.jsonl)", file_types=[".txt", ".jsonl"], interactive=not DEMO_MODE)
1395
  upload_mems_fobj = gr.File(label="3. Memories File Source (.json/.jsonl)", file_types=[".jsonl", ".json"], interactive=not DEMO_MODE)
 
1396
  load_master_btn = gr.Button("⬆️ Load from Sources", variant="primary", interactive=not DEMO_MODE)
1397
+ gr.Examples(
 
1398
  examples=[
1399
+ [placeholder_filename, ""],
 
1400
  ],
1401
  inputs=[upload_kb_img_fobj, load_kb_password_tb],
1402
  label="Click an Example to Load Data"
1403
  )
1404
+
1405
  def dyn_upd_model_dd(sel_prov_dyn: str):
1406
  models_dyn = get_model_display_names_for_provider(sel_prov_dyn); def_model_dyn = get_default_model_display_name_for_provider(sel_prov_dyn)
1407
  return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
1408
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
1409
 
1410
+ chat_ins = [user_msg_tb, research_steps_slider, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
1411
+ chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, plan_display_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
 
 
 
1412
  chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
1413
  send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
1414
 
 
1415
  save_edited_rules_btn.click(fn=save_edited_rules_action_fn, inputs=[rules_disp_ta], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1416
  clear_rules_btn.click(fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."), outputs=rules_stat_tb, show_progress=False).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1417
  clear_mems_btn.click(fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."), outputs=mems_stat_tb, show_progress=False).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
1418
 
 
1419
  dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn, show_progress=False)
1420
  dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn, show_progress=False)
1421
  create_kb_img_btn.click(
 
1425
  show_progress="full"
1426
  )
1427
 
 
1428
  load_master_btn.click(
1429
  fn=ui_load_from_sources_fn,
1430
  inputs=[upload_kb_img_fobj, upload_rules_fobj, upload_mems_fobj, load_kb_password_tb],
 
1436
  fn=ui_refresh_memories_display_fn, outputs=mems_disp_json
1437
  )
1438
 
 
1439
  if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
1440
  def save_faiss_action_with_feedback_sidebar_fn():
1441
  try: save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
1442
  except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
1443
  save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
1444
 
1445
+ app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, plan_display_md, fmt_report_tb, dl_report_btn]
 
1446
  demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
1447
 
1448
 
 
 
1449
  if __name__ == "__main__":
1450
  logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
1451
  app_port = int(os.getenv("GRADIO_PORT", 7860))
 
1453
  app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "false"
1454
  app_share = os.getenv("GRADIO_SHARE", "False").lower() == "true"
1455
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
1456
+ demo.queue().launch(server_name=app_server, server_port=app_port, debug=app_debug, share=app_share)
1457
  logger.info("Gradio application shut down.")