Update app.py
Browse files
app.py
CHANGED
|
@@ -1,225 +1,312 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
import
|
| 4 |
-
import io
|
| 5 |
import json
|
| 6 |
import logging
|
| 7 |
-
import
|
| 8 |
-
import
|
| 9 |
-
import
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
#
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
for
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
if not
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
# ==============================================================================
|
| 70 |
-
# UI HELPER & REMOTE API CALL LOGIC
|
| 71 |
-
# ==============================================================================
|
| 72 |
-
|
| 73 |
-
def get_server_list():
|
| 74 |
-
status = f"Fetching server list from remote config..."
|
| 75 |
-
yield gr.Dropdown(choices=[], value=None, label="β³ Fetching..."), status, []
|
| 76 |
-
try:
|
| 77 |
-
response = requests.get(CREATOR_ENDPOINTS_JSON_URL, timeout=10)
|
| 78 |
-
response.raise_for_status()
|
| 79 |
-
all_entries = response.json()
|
| 80 |
-
valid_endpoints = []
|
| 81 |
-
required_keys = ["name", "api_endpoint", "public_key"]
|
| 82 |
-
for entry in all_entries:
|
| 83 |
-
if all(key in entry for key in required_keys):
|
| 84 |
-
valid_endpoints.append(entry)
|
| 85 |
-
else:
|
| 86 |
-
logger.warning(f"Skipping invalid entry in configuration file: {entry}")
|
| 87 |
-
if not valid_endpoints:
|
| 88 |
-
raise ValueError("No valid server configurations found in the remote file.")
|
| 89 |
-
endpoint_names = [e['name'] for e in valid_endpoints]
|
| 90 |
-
status = f"β
Success! Found {len(endpoint_names)} valid servers."
|
| 91 |
-
yield gr.Dropdown(choices=endpoint_names, value=endpoint_names[0] if endpoint_names else None, label="Target Server"), status, valid_endpoints
|
| 92 |
-
except Exception as e:
|
| 93 |
-
status = f"β Error fetching or parsing configuration: {e}"
|
| 94 |
-
yield gr.Dropdown(choices=[], value=None, label="Error fetching servers"), status, []
|
| 95 |
|
| 96 |
-
def create_keylock_wrapper(service_name: str, secret_data: str, available_endpoints: list):
|
| 97 |
-
if not service_name: raise gr.Error("Please select a target server.")
|
| 98 |
-
public_key = next((e['public_key'] for e in available_endpoints if e['name'] == service_name), None)
|
| 99 |
-
if not public_key: raise gr.Error(f"Could not find public key for '{service_name}'.")
|
| 100 |
try:
|
| 101 |
-
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
except Exception as e:
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
response_json = response.json()
|
| 121 |
-
if response.status_code == 200:
|
| 122 |
-
if "data" in response_json:
|
| 123 |
-
yield response_json["data"][0], "β
Success! Data decrypted by remote server."
|
| 124 |
-
else:
|
| 125 |
-
raise gr.Error(f"API returned an unexpected success format: {response_json}")
|
| 126 |
else:
|
| 127 |
-
|
| 128 |
-
except Exception as e:
|
| 129 |
-
yield None, f"β Error calling server API: {e}"
|
| 130 |
-
|
| 131 |
-
def refresh_and_update_all():
|
| 132 |
-
for dropdown_update, status_update, state_update in get_server_list():
|
| 133 |
-
pass
|
| 134 |
-
return dropdown_update, dropdown_update, status_update, state_update
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
# ==============================================================================
|
| 138 |
-
# GRADIO DASHBOARD INTERFACE
|
| 139 |
-
# ==============================================================================
|
| 140 |
-
# ==============================================================================
|
| 141 |
-
# GRADIO DASHBOARD INTERFACE
|
| 142 |
-
# ==============================================================================
|
| 143 |
-
theme = gr.themes.Base(
|
| 144 |
-
primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky, neutral_hue=gr.themes.colors.slate,
|
| 145 |
-
font=(gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"),
|
| 146 |
-
).set(
|
| 147 |
-
body_background_fill="#F1F5F9", panel_background_fill="white", block_background_fill="white",
|
| 148 |
-
block_border_width="1px", block_shadow="*shadow_drop_lg",
|
| 149 |
-
button_primary_background_fill="*primary_600", button_primary_background_fill_hover="*primary_700",
|
| 150 |
-
)
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
endpoints_state = gr.State([])
|
| 155 |
-
|
| 156 |
-
gr.Markdown("# π KeyLock Operations Dashboard")
|
| 157 |
-
gr.Markdown("A centralized dashboard to manage and demonstrate the entire KeyLock ecosystem. Key/Image creation is performed locally, while decryption is handled by a **live, remote API call** to a secure server.")
|
| 158 |
-
|
| 159 |
-
with gr.Tabs() as tabs:
|
| 160 |
-
with gr.TabItem("β Create KeyLock", id=0):
|
| 161 |
-
gr.Markdown("## Step 1: Create an Encrypted Authentication Image (Local)")
|
| 162 |
-
gr.Markdown(f"This tool acts as the **Auth Creator**. It fetches a list of available servers from a public [configuration file]({CREATOR_ENDPOINTS_JSON_URL}), then uses the selected server's public key to encrypt your data into a PNG. **This entire creation process happens locally.**")
|
| 163 |
-
with gr.Row(variant="panel"):
|
| 164 |
-
with gr.Column(scale=2):
|
| 165 |
-
with gr.Row():
|
| 166 |
-
creator_service_dropdown = gr.Dropdown(label="Target Server", interactive=True, info="Select the API server to encrypt data for.")
|
| 167 |
-
refresh_button = gr.Button("π", scale=0, size="sm")
|
| 168 |
-
creator_secret_input = gr.Textbox(lines=8, label="Secret Data to Encrypt", placeholder="API_KEY: sk-123...\nUSER: demo-user")
|
| 169 |
-
creator_button = gr.Button("β¨ Create Auth Image", variant="primary")
|
| 170 |
-
with gr.Column(scale=1):
|
| 171 |
-
creator_status = gr.Textbox(label="Status", interactive=False, lines=2)
|
| 172 |
-
creator_image_output = gr.Image(label="Generated Encrypted Image", type="pil", show_download_button=True, format="png")
|
| 173 |
-
|
| 174 |
-
with gr.TabItem("β‘ Send KeyLock", id=1):
|
| 175 |
-
gr.Markdown("## Step 2: Decrypt via Live API Call")
|
| 176 |
-
gr.Markdown("This tool acts as the **Client**. It sends the encrypted image you created in Step 1 to the live, remote **Decoder Server** you select from the same configuration list. The server uses its securely stored private key to decrypt the data and sends the result back.")
|
| 177 |
-
with gr.Row(variant="panel"):
|
| 178 |
-
with gr.Column(scale=1):
|
| 179 |
-
gr.Markdown("### Configuration")
|
| 180 |
-
send_service_dropdown = gr.Dropdown(label="Target Server", interactive=True, info="Select the API server to send the image to.")
|
| 181 |
-
gr.Markdown("### Image to Send")
|
| 182 |
-
client_image_input = gr.Image(type="pil", label="Upload or Drag Encrypted Image Here", sources=["upload", "clipboard"])
|
| 183 |
-
client_button = gr.Button("π Decrypt via Remote Server", variant="primary")
|
| 184 |
-
with gr.Column(scale=1):
|
| 185 |
-
gr.Markdown("### Response from Server")
|
| 186 |
-
client_status = gr.Textbox(label="Status", interactive=False, lines=2)
|
| 187 |
-
client_json_output = gr.JSON(label="Decrypted Data")
|
| 188 |
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
| 220 |
|
| 221 |
-
|
| 222 |
-
|
| 223 |
|
| 224 |
if __name__ == "__main__":
|
| 225 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
+
|
| 3 |
+
import os
|
|
|
|
| 4 |
import json
|
| 5 |
import logging
|
| 6 |
+
import tempfile
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import gradio as gr
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
# --- Keep all your existing constants and setup ---
|
| 13 |
+
MEMORY_STORAGE_TYPE = "HF_DATASET"
|
| 14 |
+
HF_DATASET_MEMORY_REPO = "broadfield-dev/ai-brain"
|
| 15 |
+
HF_DATASET_RULES_REPO = "broadfield-dev/ai-rules"
|
| 16 |
+
|
| 17 |
+
os.environ['STORAGE_BACKEND'] = MEMORY_STORAGE_TYPE
|
| 18 |
+
if MEMORY_STORAGE_TYPE == "HF_DATASET":
|
| 19 |
+
os.environ['HF_MEMORY_DATASET_REPO'] = HF_DATASET_MEMORY_REPO
|
| 20 |
+
os.environ['HF_RULES_DATASET_REPO'] = HF_DATASET_RULES_REPO
|
| 21 |
+
|
| 22 |
+
from model_logic import get_available_providers, get_model_display_names_for_provider, get_default_model_display_name_for_provider
|
| 23 |
+
from memory_logic import (
|
| 24 |
+
initialize_memory_system, add_memory_entry, get_all_memories_cached, clear_all_memory_data_backend,
|
| 25 |
+
add_rule_entry, remove_rule_entry, get_all_rules_cached, clear_all_rules_data_backend,
|
| 26 |
+
save_faiss_indices_to_disk, STORAGE_BACKEND as MEMORY_STORAGE_BACKEND, SQLITE_DB_PATH as MEMORY_SQLITE_PATH,
|
| 27 |
+
HF_MEMORY_DATASET_REPO as MEMORY_HF_MEM_REPO, HF_RULES_DATASET_REPO as MEMORY_HF_RULES_REPO
|
| 28 |
+
)
|
| 29 |
+
from tools.orchestrator import orchestrate_and_respond
|
| 30 |
+
from learning import perform_post_interaction_learning
|
| 31 |
+
from utils import load_rules_from_file, load_memories_from_file
|
| 32 |
+
from prompts import DEFAULT_SYSTEM_PROMPT
|
| 33 |
+
|
| 34 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(threadName)s - %(message)s')
|
| 35 |
logger = logging.getLogger(__name__)
|
| 36 |
+
for lib_name in ["urllib3", "requests", "huggingface_hub", "PIL.PngImagePlugin", "matplotlib", "gradio_client.client", "multipart.multipart", "httpx", "sentence_transformers", "faiss", "datasets"]:
|
| 37 |
+
if logging.getLogger(lib_name): logging.getLogger(lib_name).setLevel(logging.WARNING)
|
| 38 |
|
| 39 |
+
MAX_HISTORY_TURNS = int(os.getenv("MAX_HISTORY_TURNS", 7))
|
| 40 |
+
LOAD_RULES_FILE = os.getenv("LOAD_RULES_FILE")
|
| 41 |
+
LOAD_MEMORIES_FILE = os.getenv("LOAD_MEMORIES_FILE")
|
| 42 |
+
current_chat_session_history = []
|
| 43 |
+
|
| 44 |
+
def handle_gradio_chat_submit(
|
| 45 |
+
user_msg_txt: str,
|
| 46 |
+
gr_hist_list: list,
|
| 47 |
+
sel_prov_name: str,
|
| 48 |
+
sel_model_disp_name: str,
|
| 49 |
+
ui_api_key: str|None,
|
| 50 |
+
cust_sys_prompt: str,
|
| 51 |
+
# New stateful inputs from the UI
|
| 52 |
+
turn_budget: int,
|
| 53 |
+
orchestrator_state: dict
|
| 54 |
+
):
|
| 55 |
+
global current_chat_session_history
|
| 56 |
+
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
| 57 |
+
updated_rules_text = ui_refresh_rules_display_fn()
|
| 58 |
+
updated_mems_json = ui_refresh_memories_display_fn()
|
| 59 |
+
def_detect_out_md = gr.Markdown(visible=False)
|
| 60 |
+
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
|
| 61 |
+
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
| 62 |
+
|
| 63 |
+
# Make a mutable copy of the state for this run
|
| 64 |
+
latest_orchestrator_state = dict(orchestrator_state)
|
| 65 |
+
|
| 66 |
+
# Check for invalid input: empty message when no task is running
|
| 67 |
+
is_ongoing_task = latest_orchestrator_state.get('is_complex_task', False)
|
| 68 |
+
if not user_msg_txt.strip() and not is_ongoing_task:
|
| 69 |
+
status_txt = "Error: Please enter a message to start."
|
| 70 |
+
# Don't add to history, just show status
|
| 71 |
+
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json, latest_orchestrator_state)
|
| 72 |
+
return
|
| 73 |
+
|
| 74 |
+
# If user sends empty message during a task, it means "continue"
|
| 75 |
+
effective_user_msg = user_msg_txt or "(Continuing previous task...)"
|
| 76 |
+
updated_gr_hist.append((effective_user_msg, "<i>Thinking...</i>"))
|
| 77 |
+
# Yield initial response to show the user's message in the chat
|
| 78 |
+
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json, latest_orchestrator_state)
|
| 79 |
+
|
| 80 |
+
internal_hist = list(current_chat_session_history); internal_hist.append({"role": "user", "content": user_msg_txt})
|
| 81 |
+
if len(internal_hist) > MAX_HISTORY_TURNS * 2:
|
| 82 |
+
internal_hist = internal_hist[-(MAX_HISTORY_TURNS * 2):]
|
| 83 |
+
|
| 84 |
+
final_bot_resp_acc, insights_used_parsed = "", []
|
| 85 |
+
temp_dl_file_path = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
try:
|
| 88 |
+
processor_gen = orchestrate_and_respond(
|
| 89 |
+
user_input=user_msg_txt,
|
| 90 |
+
provider_name=sel_prov_name,
|
| 91 |
+
model_display_name=sel_model_disp_name,
|
| 92 |
+
chat_history_for_prompt=internal_hist,
|
| 93 |
+
custom_system_prompt=cust_sys_prompt.strip() or None,
|
| 94 |
+
ui_api_key_override=ui_api_key.strip() if ui_api_key else None,
|
| 95 |
+
# Pass the new stateful parameters
|
| 96 |
+
turn_budget=int(turn_budget),
|
| 97 |
+
orchestrator_state=orchestrator_state
|
| 98 |
+
)
|
| 99 |
+
curr_bot_disp_msg = ""
|
| 100 |
+
for upd_type, upd_data in processor_gen:
|
| 101 |
+
if upd_type == "state":
|
| 102 |
+
latest_orchestrator_state = upd_data
|
| 103 |
+
continue # This is a background update, no need to yield to UI yet
|
| 104 |
+
|
| 105 |
+
if upd_type == "status":
|
| 106 |
+
status_txt = upd_data
|
| 107 |
+
if updated_gr_hist[-1][0] == effective_user_msg:
|
| 108 |
+
updated_gr_hist[-1] = (effective_user_msg, f"{curr_bot_disp_msg} <i>{status_txt}</i>" if curr_bot_disp_msg else f"<i>{status_txt}</i>")
|
| 109 |
+
elif upd_type == "response_chunk":
|
| 110 |
+
curr_bot_disp_msg += upd_data
|
| 111 |
+
if updated_gr_hist[-1][0] == effective_user_msg:
|
| 112 |
+
updated_gr_hist[-1] = (effective_user_msg, curr_bot_disp_msg)
|
| 113 |
+
elif upd_type == "final_response_and_insights":
|
| 114 |
+
final_bot_resp_acc, insights_used_parsed = upd_data["response"], upd_data["insights_used"]
|
| 115 |
+
status_txt = "Response generated. Processing learning..."
|
| 116 |
+
if not curr_bot_disp_msg and final_bot_resp_acc : curr_bot_disp_msg = final_bot_resp_acc
|
| 117 |
+
if updated_gr_hist[-1][0] == effective_user_msg: updated_gr_hist[-1] = (effective_user_msg, curr_bot_disp_msg or "(No text)")
|
| 118 |
+
def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
|
| 119 |
+
if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
|
| 120 |
+
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md", encoding='utf-8') as tmpfile:
|
| 121 |
+
tmpfile.write(curr_bot_disp_msg)
|
| 122 |
+
temp_dl_file_path = tmpfile.name
|
| 123 |
+
def_dl_btn = gr.DownloadButton(value=temp_dl_file_path, visible=True, interactive=True)
|
| 124 |
+
insights_md_content = "### Insights Considered:\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
| 125 |
+
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=bool(insights_used_parsed))
|
| 126 |
+
|
| 127 |
+
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json, latest_orchestrator_state)
|
| 128 |
+
if upd_type == "final_response_and_insights": break
|
| 129 |
except Exception as e:
|
| 130 |
+
logger.error(f"Chat handler error: {e}", exc_info=True); status_txt = f"Error: {str(e)[:100]}"
|
| 131 |
+
error_message_for_chat = f"Sorry, an error occurred: {str(e)[:100]}"
|
| 132 |
+
if updated_gr_hist[-1][0] == effective_user_msg: updated_gr_hist[-1] = (effective_user_msg, error_message_for_chat)
|
| 133 |
+
else: updated_gr_hist.append((effective_user_msg, error_message_for_chat))
|
| 134 |
+
latest_orchestrator_state = {} # Reset state on error
|
| 135 |
+
yield (cleared_input, updated_gr_hist, status_txt, gr.Markdown(value="*Error processing request.*", visible=True), gr.Textbox(value=error_message_for_chat, interactive=True), def_dl_btn, ui_refresh_rules_display_fn(), ui_refresh_memories_display_fn(), latest_orchestrator_state)
|
| 136 |
+
if temp_dl_file_path and os.path.exists(temp_dl_file_path): os.unlink(temp_dl_file_path)
|
| 137 |
+
return
|
| 138 |
+
|
| 139 |
+
# Don't add partial/continue messages to long-term history
|
| 140 |
+
if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
|
| 141 |
+
# Add the initial user prompt and the final bot response to history
|
| 142 |
+
if orchestrator_state.get('is_complex_task'):
|
| 143 |
+
# For a complex task, the history entry should be the original goal
|
| 144 |
+
user_prompt_for_history = orchestrator_state.get('original_goal', user_msg_txt)
|
| 145 |
+
current_chat_session_history.extend([{"role": "user", "content": user_prompt_for_history}, {"role": "assistant", "content": final_bot_resp_acc}])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
else:
|
| 147 |
+
current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
+
if len(current_chat_session_history) > MAX_HISTORY_TURNS * 2:
|
| 150 |
+
current_chat_session_history = current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
+
status_txt = "<i>[Performing post-interaction learning...]</i>"
|
| 153 |
+
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, ui_refresh_rules_display_fn(), ui_refresh_memories_display_fn(), latest_orchestrator_state)
|
| 154 |
+
try:
|
| 155 |
+
perform_post_interaction_learning(user_input=user_msg_txt, bot_response=final_bot_resp_acc, provider=sel_prov_name, model_disp_name=sel_model_disp_name, insights_reflected=insights_used_parsed, api_key_override=ui_api_key.strip() if ui_api_key else None)
|
| 156 |
+
status_txt = "Response & Learning Complete."
|
| 157 |
+
except Exception as e_learn:
|
| 158 |
+
logger.error(f"Error during post-interaction learning: {e_learn}", exc_info=True)
|
| 159 |
+
status_txt = "Response complete. Error during learning."
|
| 160 |
+
else: status_txt = final_bot_resp_acc or "Processing finished."
|
| 161 |
+
|
| 162 |
+
current_chat_session_history = [h for h in current_chat_session_history if h['content']] # Clean up empty entries
|
| 163 |
+
|
| 164 |
+
updated_rules_text = ui_refresh_rules_display_fn()
|
| 165 |
+
updated_mems_json = ui_refresh_memories_display_fn()
|
| 166 |
+
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json, latest_orchestrator_state)
|
| 167 |
+
|
| 168 |
+
if temp_dl_file_path and os.path.exists(temp_dl_file_path): os.unlink(temp_dl_file_path)
|
| 169 |
+
|
| 170 |
+
# --- Keep all your UI helper functions (ui_refresh_rules_display_fn, etc.) unchanged ---
|
| 171 |
+
def ui_refresh_rules_display_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
|
| 172 |
+
def ui_download_rules_action_fn():
|
| 173 |
+
rules_content = "\n\n---\n\n".join(get_all_rules_cached())
|
| 174 |
+
if not rules_content.strip():
|
| 175 |
+
gr.Warning("No rules to download.")
|
| 176 |
+
return gr.DownloadButton(value=None, interactive=False, label="No Rules")
|
| 177 |
+
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt", encoding='utf-8') as tmpfile:
|
| 178 |
+
tmpfile.write(rules_content)
|
| 179 |
+
return tmpfile.name
|
| 180 |
+
def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
|
| 181 |
+
if not uploaded_file_obj: return "No file provided."
|
| 182 |
+
added, skipped, errors = load_rules_from_file(uploaded_file_obj.name, progress_callback=lambda p, d: progress(p, desc=d))
|
| 183 |
+
return f"Rules Upload: Added: {added}, Skipped (duplicates): {skipped}, Errors: {errors}."
|
| 184 |
+
|
| 185 |
+
def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
|
| 186 |
+
def ui_download_memories_action_fn():
|
| 187 |
+
memories = get_all_memories_cached()
|
| 188 |
+
if not memories:
|
| 189 |
+
gr.Warning("No memories to download.")
|
| 190 |
+
return gr.DownloadButton(value=None, interactive=False, label="No Memories")
|
| 191 |
+
jsonl_content = "\n".join([json.dumps(mem) for mem in memories])
|
| 192 |
+
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".jsonl", encoding='utf-8') as tmpfile:
|
| 193 |
+
tmpfile.write(jsonl_content)
|
| 194 |
+
return tmpfile.name
|
| 195 |
+
def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
|
| 196 |
+
if not uploaded_file_obj: return "No file provided."
|
| 197 |
+
added, format_err, save_err = load_memories_from_file(uploaded_file_obj.name, progress_callback=lambda p, d: progress(p, desc=d))
|
| 198 |
+
return f"Memories Upload: Added: {added}, Format Errors: {format_err}, Save Errors: {save_err}."
|
| 199 |
+
|
| 200 |
+
def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
|
| 201 |
+
if not edited_rules_text.strip(): return "No rules text to save."
|
| 202 |
+
potential_rules = edited_rules_text.split("\n\n---\n\n")
|
| 203 |
+
if len(potential_rules) == 1 and "\n" in edited_rules_text:
|
| 204 |
+
potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
|
| 205 |
+
unique_rules = sorted(list(set(filter(None, [r.strip() for r in potential_rules]))))
|
| 206 |
+
if not unique_rules: return "No unique, non-empty rules found."
|
| 207 |
+
added, skipped, errors, total = 0, 0, 0, len(unique_rules)
|
| 208 |
+
progress(0, desc=f"Saving {total} unique rules...")
|
| 209 |
+
for idx, rule_text in enumerate(unique_rules):
|
| 210 |
+
success, status_msg = add_rule_entry(rule_text)
|
| 211 |
+
if success: added += 1
|
| 212 |
+
elif status_msg == "duplicate": skipped += 1
|
| 213 |
+
else: errors += 1
|
| 214 |
+
progress((idx + 1) / total, desc=f"Processed {idx+1}/{total} rules...")
|
| 215 |
+
return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors: {errors} from {total} unique rules."
|
| 216 |
+
|
| 217 |
+
def app_load_fn():
|
| 218 |
+
logger.info("App loading. Initializing systems...")
|
| 219 |
+
initialize_memory_system()
|
| 220 |
+
rules_added, rules_skipped, rules_errors = load_rules_from_file(LOAD_RULES_FILE)
|
| 221 |
+
mems_added, mems_format_errors, mems_save_errors = load_memories_from_file(LOAD_MEMORIES_FILE)
|
| 222 |
+
status = f"Ready. Rules loaded: {rules_added}. Memories loaded: {mems_added}."
|
| 223 |
+
return (status, ui_refresh_rules_display_fn(), ui_refresh_memories_display_fn(), gr.Markdown(visible=False), gr.Textbox(value="*Waiting...*", interactive=True), gr.DownloadButton(interactive=False, visible=False))
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .status-text { font-size: 0.9em; color: #555; }") as demo:
|
| 227 |
+
# This component holds the state of the orchestrator between runs
|
| 228 |
+
orchestrator_state = gr.State({})
|
| 229 |
+
|
| 230 |
+
gr.Markdown("# π€ AI Research Agent")
|
| 231 |
+
with gr.Row(variant="compact"):
|
| 232 |
+
agent_stat_tb = gr.Textbox(label="Agent Status", value="Initializing...", interactive=False, elem_classes=["status-text"], scale=4)
|
| 233 |
+
with gr.Column(scale=1, min_width=150):
|
| 234 |
+
memory_backend_info_tb = gr.Textbox(label="Memory Backend", value=MEMORY_STORAGE_BACKEND, interactive=False)
|
| 235 |
+
hf_repos_display = gr.Textbox(label="HF Repos", value=f"M: {MEMORY_HF_MEM_REPO}, R: {MEMORY_HF_RULES_REPO}", interactive=False, visible=MEMORY_STORAGE_BACKEND == "HF_DATASET")
|
| 236 |
+
with gr.Row():
|
| 237 |
+
with gr.Sidebar():
|
| 238 |
+
gr.Markdown("## βοΈ Configuration")
|
| 239 |
+
with gr.Group():
|
| 240 |
+
api_key_tb = gr.Textbox(label="API Key (Override)", type="password", placeholder="Uses .env if blank")
|
| 241 |
+
available_providers = get_available_providers()
|
| 242 |
+
default_provider = available_providers[0] if available_providers else None
|
| 243 |
+
prov_sel_dd = gr.Dropdown(label="AI Provider", choices=available_providers, value=default_provider, interactive=True)
|
| 244 |
+
model_sel_dd = gr.Dropdown(label="AI Model", choices=get_model_display_names_for_provider(default_provider) if default_provider else [], value=get_default_model_display_name_for_provider(default_provider), interactive=True)
|
| 245 |
+
# New slider for controlling the agent's loop
|
| 246 |
+
turn_budget_slider = gr.Slider(label="Max Steps Per Turn (0=Continuous)", minimum=0, maximum=20, step=1, value=5, interactive=True)
|
| 247 |
+
with gr.Group():
|
| 248 |
+
sys_prompt_tb = gr.Textbox(label="System Prompt", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
|
| 249 |
+
if MEMORY_STORAGE_BACKEND == "RAM":
|
| 250 |
+
save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
|
| 251 |
+
with gr.Column(scale=3):
|
| 252 |
+
with gr.Tabs():
|
| 253 |
+
with gr.TabItem("π¬ Chat & Research"):
|
| 254 |
+
main_chat_disp = gr.Chatbot(height=400, show_copy_button=True, render_markdown=True)
|
| 255 |
+
with gr.Row(variant="compact"):
|
| 256 |
+
user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1)
|
| 257 |
+
send_btn = gr.Button("Send / Continue", variant="primary", scale=1, min_width=120)
|
| 258 |
+
with gr.Accordion("π Detailed Response & Insights", open=False):
|
| 259 |
+
fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
|
| 260 |
+
dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
|
| 261 |
+
detect_out_md = gr.Markdown(visible=False)
|
| 262 |
+
with gr.TabItem("π§ Knowledge Base"):
|
| 263 |
+
with gr.Row(equal_height=True):
|
| 264 |
+
with gr.Column():
|
| 265 |
+
gr.Markdown("### π Rules Management")
|
| 266 |
+
rules_disp_ta = gr.TextArea(label="Current Rules", lines=10, interactive=True)
|
| 267 |
+
save_edited_rules_btn = gr.Button("πΎ Save Edited Text", variant="primary")
|
| 268 |
+
with gr.Row(variant="compact"):
|
| 269 |
+
dl_rules_btn = gr.DownloadButton("β¬οΈ Download Rules")
|
| 270 |
+
clear_rules_btn = gr.Button("ποΈ Clear All Rules", variant="stop")
|
| 271 |
+
upload_rules_fobj = gr.File(label="Upload Rules File (.txt/.jsonl)", file_types=[".txt", ".jsonl"])
|
| 272 |
+
rules_stat_tb = gr.Textbox(label="Rules Status", interactive=False, lines=1)
|
| 273 |
+
with gr.Column():
|
| 274 |
+
gr.Markdown("### π Memories Management")
|
| 275 |
+
mems_disp_json = gr.JSON(label="Current Memories", value=[])
|
| 276 |
+
with gr.Row(variant="compact"):
|
| 277 |
+
dl_mems_btn = gr.DownloadButton("β¬οΈ Download Memories")
|
| 278 |
+
clear_mems_btn = gr.Button("ποΈ Clear All Memories", variant="stop")
|
| 279 |
+
upload_mems_fobj = gr.File(label="Upload Memories File (.json/.jsonl)", file_types=[".json", ".jsonl"])
|
| 280 |
+
mems_stat_tb = gr.Textbox(label="Memories Status", interactive=False, lines=1)
|
| 281 |
+
|
| 282 |
+
prov_sel_dd.change(lambda p: gr.Dropdown(choices=get_model_display_names_for_provider(p), value=get_default_model_display_name_for_provider(p), interactive=True), prov_sel_dd, model_sel_dd)
|
| 283 |
+
|
| 284 |
+
# Update inputs and outputs for the chat event to include the new stateful components
|
| 285 |
+
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb, turn_budget_slider, orchestrator_state]
|
| 286 |
+
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json, orchestrator_state]
|
| 287 |
|
| 288 |
+
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
| 289 |
+
send_btn.click(**chat_event_args)
|
| 290 |
+
user_msg_tb.submit(**chat_event_args)
|
| 291 |
+
|
| 292 |
+
# Keep all other button clicks the same
|
| 293 |
+
dl_rules_btn.click(ui_download_rules_action_fn, None, dl_rules_btn)
|
| 294 |
+
save_edited_rules_btn.click(save_edited_rules_action_fn, [rules_disp_ta], [rules_stat_tb]).then(ui_refresh_rules_display_fn, outputs=rules_disp_ta)
|
| 295 |
+
upload_rules_fobj.upload(ui_upload_rules_action_fn, [upload_rules_fobj], [rules_stat_tb]).then(ui_refresh_rules_display_fn, outputs=rules_disp_ta)
|
| 296 |
+
clear_rules_btn.click(lambda: ("Cleared." if clear_all_rules_data_backend() else "Error."), outputs=rules_stat_tb).then(ui_refresh_rules_display_fn, outputs=rules_disp_ta)
|
| 297 |
+
|
| 298 |
+
dl_mems_btn.click(ui_download_memories_action_fn, None, dl_mems_btn)
|
| 299 |
+
upload_mems_fobj.upload(ui_upload_memories_action_fn, [upload_mems_fobj], [mems_stat_tb]).then(ui_refresh_memories_display_fn, outputs=mems_disp_json)
|
| 300 |
+
clear_mems_btn.click(lambda: ("Cleared." if clear_all_memory_data_backend() else "Error."), outputs=mems_stat_tb).then(ui_refresh_memories_display_fn, outputs=mems_disp_json)
|
| 301 |
|
| 302 |
+
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
| 303 |
+
save_faiss_sidebar_btn.click(lambda: (gr.Info("Saved FAISS to disk.") if save_faiss_indices_to_disk() is None else gr.Error("Error saving FAISS.")), None, None)
|
| 304 |
|
| 305 |
+
app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, detect_out_md, fmt_report_tb, dl_report_btn]
|
| 306 |
+
demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs)
|
| 307 |
|
| 308 |
if __name__ == "__main__":
|
| 309 |
+
app_port = int(os.getenv("GRADIO_PORT", 7860))
|
| 310 |
+
app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
|
| 311 |
+
logger.info(f"Launching Gradio server: http://{app_server}:{app_port}")
|
| 312 |
+
demo.queue().launch(server_name=app_server, server_port=app_port)
|