Spaces:
Sleeping
Sleeping
# app.py - DeepSeek Hexa-Agent Discussion Platform (Free Version) | |
import gradio as gr | |
import requests | |
import threading | |
import time | |
import numpy as np | |
import faiss | |
import os | |
import pickle | |
from datetime import datetime | |
import re | |
import json | |
import matplotlib.pyplot as plt | |
import networkx as nx | |
from reportlab.lib.pagesizes import letter | |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer | |
from reportlab.lib.styles import getSampleStyleSheet | |
from functools import lru_cache | |
from sentence_transformers import SentenceTransformer | |
# === CONFIG === | |
EMBEDDING_MODEL = "all-MiniLM-L6-v2" # Local embedding model | |
CHAT_MODEL = "HuggingFaceH4/zephyr-7b-beta" # Free model via Hugging Face API | |
MEMORY_FILE = "memory.pkl" | |
INDEX_FILE = "memory.index" | |
HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "") # Optional for higher rate limits | |
# Initialize local embedding model | |
embedding_model = SentenceTransformer(EMBEDDING_MODEL) | |
# === EMBEDDING UTILS === | |
def get_embedding(text): | |
"""Local embedding function""" | |
return embedding_model.encode(text) | |
def cosine_similarity(vec1, vec2): | |
vec1 = np.array(vec1) | |
vec2 = np.array(vec2) | |
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) | |
# === MEMORY INITIALIZATION === | |
memory_data = [] | |
try: | |
memory_index = faiss.read_index(INDEX_FILE) | |
with open(MEMORY_FILE, "rb") as f: | |
memory_data = pickle.load(f) | |
except: | |
memory_index = faiss.IndexFlatL2(384) # 384 dimensions for MiniLM | |
# === AGENT SYSTEM PROMPTS === | |
AGENT_A_PROMPT = """You are the Discussion Initiator. Your role: | |
1. Introduce complex topics requiring multidisciplinary perspectives | |
2. Frame debates exploring tensions between values, ethics, and progress | |
3. Challenge assumptions while maintaining intellectual humility | |
4. Connect concepts across domains (science, ethics, policy, technology) | |
5. Elevate discussions beyond surface-level analysis""" | |
AGENT_B_PROMPT = """You are the Critical Responder. Your role: | |
1. Provide counterpoints with evidence-based reasoning | |
2. Identify logical fallacies and cognitive biases in arguments | |
3. Analyze implications at different scales (individual, societal, global) | |
4. Consider second and third-order consequences | |
5. Balance idealism with practical constraints""" | |
OVERSEER_PROMPT = """You are the Depth Guardian. Your role: | |
1. Ensure discussions maintain intellectual rigor | |
2. Intervene when conversations become superficial or repetitive | |
3. Highlight unexamined assumptions and blind spots | |
4. Introduce relevant frameworks (systems thinking, ethical paradigms) | |
5. Prompt consideration of marginalized perspectives | |
6. Synthesize key tensions and paradoxes""" | |
OUTSIDER_PROMPT = """You are the Cross-Disciplinary Provocateur. Your role: | |
1. Introduce radical perspectives from unrelated fields | |
2. Challenge conventional wisdom with contrarian viewpoints | |
3. Surface historical precedents and analogies | |
4. Propose unconventional solutions to complex problems | |
5. Highlight overlooked connections and systemic relationships | |
6. Question the framing of the discussion itself""" | |
CULTURAL_LENS_PROMPT = """You are the Cultural Perspective. Your role: | |
1. Provide viewpoints from diverse global cultures (Eastern, Western, Indigenous, African, etc.) | |
2. Highlight how cultural values shape perspectives on the topic | |
3. Identify cultural biases in arguments and assumptions | |
4. Share traditions and practices relevant to the discussion | |
5. Suggest culturally inclusive approaches to solutions | |
6. Bridge cultural divides through nuanced understanding | |
7. Consider post-colonial and decolonial perspectives""" | |
JUDGE_PROMPT = """You are the Impartial Judge. Your role: | |
1. Periodically review the discussion and provide balanced rulings | |
2. Identify areas of agreement and unresolved tensions | |
3. Evaluate the strength of arguments from different perspectives | |
4. Highlight the most compelling insights and critical flaws | |
5. Suggest pathways toward resolution or further inquiry | |
6. Deliver rulings with clear justification and constructive guidance | |
7. Maintain objectivity while acknowledging valid points from all sides | |
8. Consider ethical implications and practical feasibility""" | |
# === GLOBAL STATE === | |
conversation = [] | |
turn_count = 0 | |
auto_mode = False | |
current_topic = "" | |
last_ruling_turn = 0 | |
agent_params = { | |
"Initiator": {"creativity": 0.7, "criticality": 0.5}, | |
"Responder": {"creativity": 0.5, "criticality": 0.8}, | |
"Guardian": {"creativity": 0.6, "criticality": 0.9}, | |
"Provocateur": {"creativity": 0.9, "criticality": 0.7}, | |
"Cultural": {"creativity": 0.7, "criticality": 0.6}, | |
"Judge": {"creativity": 0.4, "criticality": 0.9} | |
} | |
# === FREE CHAT COMPLETION API === | |
def safe_chat_completion(system, messages, temperature=0.7): | |
"""Use free Hugging Face Inference API""" | |
try: | |
# Format messages for Hugging Face API | |
formatted = [{"role": "system", "content": system}] | |
formatted.extend(messages) | |
# Prepare payload | |
payload = { | |
"inputs": formatted, | |
"parameters": { | |
"max_new_tokens": 300, | |
"temperature": temperature, | |
"return_full_text": False | |
} | |
} | |
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} if HF_API_TOKEN else {} | |
response = requests.post( | |
f"https://api-inference.huggingface.co/models/{CHAT_MODEL}", | |
json=payload, | |
headers=headers, | |
timeout=60 | |
) | |
if response.status_code == 200: | |
return response.json()[0]['generated_text'].strip() | |
elif response.status_code == 503: # Model loading | |
time.sleep(15) | |
return safe_chat_completion(system, messages, temperature) | |
else: | |
return f"⚠️ API Error: {response.text}" | |
except Exception as e: | |
return f"⚠️ System Error: {str(e)}" | |
# === MEMORY MANAGEMENT === | |
def embed_and_store(text, agent=None): | |
try: | |
vec = get_embedding(text) | |
memory_index.add(np.array([vec], dtype='float32')) | |
memory_data.append({ | |
"text": text, | |
"timestamp": datetime.now().isoformat(), | |
"agent": agent or "system", | |
"topic": current_topic | |
}) | |
if len(memory_data) % 5 == 0: | |
with open(MEMORY_FILE, "wb") as f: | |
pickle.dump(memory_data, f) | |
faiss.write_index(memory_index, INDEX_FILE) | |
except Exception as e: | |
print(f"Memory Error: {str(e)}") | |
def retrieve_relevant_memory(query, k=3): | |
"""Retrieve relevant past discussions""" | |
try: | |
query_embedding = get_embedding(query) | |
distances, indices = memory_index.search(np.array([query_embedding], dtype='float32'), k) | |
relevant = [] | |
for i, idx in enumerate(indices[0]): | |
if idx < len(memory_data) and idx >= 0: | |
relevant.append({ | |
"text": memory_data[idx]['text'][:200] + "...", | |
"topic": memory_data[idx].get('topic', 'Unknown'), | |
"agent": memory_data[idx].get('agent', 'Unknown'), | |
"similarity": 1 - distances[0][i] # Convert distance to similarity | |
}) | |
return relevant | |
except Exception as e: | |
print(f"Memory retrieval error: {str(e)}") | |
return [] | |
# ... [Rest of the functions remain the same as previous implementation] ... | |
# Keep all the functions from the previous implementation except: | |
# - safe_chat_completion (already replaced above) | |
# - get_embedding (already replaced above) | |
# ... [Keep all imports, config, and function definitions above] ... | |
# === GRADIO UI === | |
with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as demo: | |
gr.Markdown("# 🧠 Hexa-Agent Discussion System (Free Version)") | |
gr.Markdown("### Powered by Open-Source Models") | |
# State variables | |
conversation_state = gr.State([]) | |
turn_count_state = gr.State(0) | |
current_topic_state = gr.State("") | |
last_ruling_turn_state = gr.State(0) | |
auto_mode_state = gr.State(False) | |
agent_params_state = gr.State(agent_params) | |
# Status panel | |
with gr.Row(): | |
turn_counter = gr.Number(label="Turn Count", value=0, interactive=False) | |
topic_display = gr.Textbox(label="Current Topic", interactive=False, lines=2) | |
agent_status = gr.Textbox(label="Active Agents", value="💡 Initiator, 🔍 Responder", interactive=False) | |
# Tabbed interface | |
with gr.Tab("Live Discussion"): | |
convo_display = gr.HTML( | |
value="<div class='convo-container'>Discussion will appear here</div>", | |
elem_id="convo-display" | |
) | |
with gr.Row(): | |
step_btn = gr.Button("▶️ Next Turn", variant="primary") | |
auto_btn = gr.Button("🔴 Auto: OFF", variant="secondary") | |
clear_btn = gr.Button("🔄 New Discussion", variant="stop") | |
topic_btn = gr.Button("🎲 Random Topic", variant="secondary") | |
ruling_btn = gr.Button("⚖️ Request Ruling", variant="primary") | |
with gr.Accordion("💬 Guide the Discussion", open=False): | |
topic_input = gr.Textbox(label="Set Custom Topic", placeholder="e.g., Ethics of AGI in cultural contexts...") | |
with gr.Row(): | |
qbox = gr.Textbox(label="Ask the Depth Guardian", placeholder="What perspectives are missing?") | |
ruling_qbox = gr.Textbox(label="Specific Question for Judge", placeholder="What should be our guiding principle?") | |
with gr.Row(): | |
overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False) | |
judge_out = gr.Textbox(label="Judge's Response", interactive=False) | |
# === COMPLETE IMPLEMENTATION === | |
def overseer_respond(question, conversation, current_topic): | |
"""Get response from Depth Guardian""" | |
context = f"Current Topic: {current_topic}\n\n" if current_topic else "" | |
context += "Conversation History:\n" | |
for msg in conversation[-5:]: | |
context += f"- {msg['agent']}: {msg['text']}\n" | |
response = safe_chat_completion( | |
system=OVERSEER_PROMPT, | |
messages=[{"role": "user", "content": f"{context}\nQuestion: {question}"}], | |
temperature=0.8 | |
) | |
embed_and_store(response, "Guardian") | |
return response | |
def ask_judge(question, conversation, current_topic): | |
"""Get ruling from Judge""" | |
context = f"Topic: {current_topic}\n\n" if current_topic else "" | |
context += "Recent Discussion:\n" | |
for msg in conversation[-5:]: | |
context += f"- {msg['agent']}: {msg['text']}\n" | |
response = safe_chat_completion( | |
system=JUDGE_PROMPT, | |
messages=[{"role": "user", "content": f"{context}\nSpecific Question: {question}"}], | |
temperature=0.6 | |
) | |
def step(topic_input, conversation, turn_count, current_topic, last_ruling_turn, agent_params): | |
"""Advance the discussion by one turn""" | |
# Remove global declarations - we'll use the parameters directly | |
# Set topic on first turn | |
if turn_count == 0: | |
if topic_input.strip(): | |
current_topic = topic_input.strip() | |
else: | |
current_topic = "Ethical Implications of Advanced AI Systems" | |
# Determine which agent speaks | |
agent_sequence = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural"] | |
agent_index = turn_count % len(agent_sequence) | |
agent_name = agent_sequence[agent_index] | |
# Special handling for Judge | |
judge_interval = 5 | |
if turn_count - last_ruling_turn >= judge_interval and turn_count > 0: | |
agent_name = "Judge" | |
# Get system prompt and temperature | |
prompts = { | |
"Initiator": AGENT_A_PROMPT, | |
"Responder": AGENT_B_PROMPT, | |
"Guardian": OVERSEER_PROMPT, | |
"Provocateur": OUTSIDER_PROMPT, | |
"Cultural": CULTURAL_LENS_PROMPT, | |
"Judge": JUDGE_PROMPT | |
} | |
temperature = agent_params[agent_name]["creativity"] | |
# Prepare context | |
context = f"Current Topic: {current_topic}\n\nDiscussion History:\n" | |
for msg in conversation[-5:]: | |
context += f"{msg['agent']}: {msg['text']}\n\n" | |
# Generate response | |
response = safe_chat_completion( | |
system=prompts[agent_name], | |
messages=[{"role": "user", "content": context}], | |
temperature=temperature | |
) | |
# Create message entry | |
new_entry = { | |
"agent": agent_name, | |
"text": response, | |
"turn": turn_count + 1 | |
} | |
# Update state | |
updated_conversation = conversation + [new_entry] | |
new_turn_count = turn_count + 1 | |
new_last_ruling_turn = new_turn_count if agent_name == "Judge" else last_ruling_turn | |
# Update memory | |
embed_and_store(response, agent_name, current_topic) # Pass current_topic here | |
# Format HTML output | |
html_output = format_conversation_html(updated_conversation) | |
# Get agent-specific displays | |
intervention = get_last_by_agent(updated_conversation, "Guardian") | |
outsider = get_last_by_agent(updated_conversation, "Provocateur") | |
cultural = get_last_by_agent(updated_conversation, "Cultural") | |
judge = get_last_by_agent(updated_conversation, "Judge") | |
# Prepare agent status | |
active_agents = " | ".join([f"{agent}: {entry['text'][:30]}..." for agent, entry in zip( | |
["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"], | |
[new_entry] * 6 # Simplified for demo | |
)]) | |
return ( | |
html_output, | |
intervention, | |
outsider, | |
cultural, | |
judge, | |
current_topic, | |
new_turn_count, | |
active_agents, | |
updated_conversation, | |
new_turn_count, | |
current_topic, | |
new_last_ruling_turn, | |
agent_params | |
) | |
# Update embed_and_store to accept topic as parameter | |
def embed_and_store(text, agent=None, topic=""): | |
"""Store text with associated topic""" | |
try: | |
vec = get_embedding(text) | |
memory_index.add(np.array([vec], dtype='float32')) | |
memory_data.append({ | |
"text": text, | |
"timestamp": datetime.now().isoformat(), | |
"agent": agent or "system", | |
"topic": topic | |
}) | |
if len(memory_data) % 5 == 0: | |
with open(MEMORY_FILE, "wb") as f: | |
pickle.dump(memory_data, f) | |
faiss.write_index(memory_index, INDEX_FILE) | |
except Exception as e: | |
print(f"Memory Error: {str(e)}") | |
# ... [Rest of the functions remain unchanged] ... | |
def get_last_by_agent(conversation, agent_name): | |
"""Get last message from specific agent""" | |
for msg in reversed(conversation): | |
if msg["agent"] == agent_name: | |
return msg["text"] | |
return "No message yet" | |
def format_conversation_html(conversation): | |
"""Format conversation as HTML""" | |
html = "<div class='convo-container'>" | |
for msg in conversation: | |
agent = msg["agent"] | |
color_map = { | |
"Initiator": "#e6f7ff", | |
"Responder": "#f6ffed", | |
"Guardian": "#fff7e6", | |
"Provocateur": "#f9e6ff", | |
"Cultural": "#e6ffed", | |
"Judge": "#f0f0f0", | |
"User": "#f0f0f0" | |
} | |
color = color_map.get(agent, "#ffffff") | |
html += f""" | |
<div style='background:{color}; padding:10px; margin:10px; border-radius:5px;'> | |
<b>{agent}:</b> {msg['text']} | |
</div> | |
""" | |
html += "</div>" | |
return html | |
def toggle_auto(auto_mode): | |
"""Toggle auto-advance mode""" | |
new_mode = not auto_mode | |
return ("🟢 Auto: ON" if new_mode else "🔴 Auto: OFF", new_mode) | |
def clear_convo(): | |
"""Reset conversation""" | |
global conversation, turn_count, current_topic, last_ruling_turn | |
conversation = [] | |
turn_count = 0 | |
current_topic = "" | |
last_ruling_turn = 0 | |
return ( | |
format_conversation_html([]), | |
"", | |
"", | |
"", | |
"", | |
"", | |
0, | |
"💡 Initiator, 🔍 Responder", | |
[], | |
0, | |
"", | |
0, | |
"", | |
"" | |
) | |
def new_topic(conversation, turn_count, current_topic): | |
"""Generate a new discussion topic""" | |
# In a real implementation, this would call an LLM to generate a topic | |
topics = [ | |
"The Ethics of Genetic Engineering in Humans", | |
"Universal Basic Income in the Age of Automation", | |
"Cultural Impacts of Global AI Deployment", | |
"Privacy vs Security in Digital Societies", | |
"The Future of Human-AI Collaboration" | |
] | |
new_topic = np.random.choice(topics) | |
return ( | |
format_conversation_html([]), | |
new_topic, | |
0, | |
[], | |
0, | |
new_topic | |
) | |
def request_ruling(conversation, current_topic, turn_count, last_ruling_turn): | |
"""Request a ruling from the Judge""" | |
context = f"Topic: {current_topic}\n\nDiscussion Summary:\n" | |
for msg in conversation[-5:]: | |
context += f"- {msg['agent']}: {msg['text']}\n" | |
response = safe_chat_completion( | |
system=JUDGE_PROMPT, | |
messages=[{"role": "user", "content": f"{context}\nPlease provide a comprehensive ruling."}], | |
temperature=0.5 | |
) | |
new_entry = { | |
"agent": "Judge", | |
"text": response, | |
"turn": turn_count | |
} | |
updated_conversation = conversation + [new_entry] | |
return response, updated_conversation, turn_count | |
def run_analysis(conversation): | |
"""Run basic analysis (simplified for free version)""" | |
# Sentiment analysis placeholder | |
sentiments = ["Positive", "Neutral", "Negative"] | |
sentiment_result = np.random.choice(sentiments, p=[0.4, 0.4, 0.2]) | |
# Topic extraction placeholder | |
topics = ["AI Ethics", "Policy", "Cultural Impact", "Technology", "Future Scenarios"] | |
topic_result = ", ".join(np.random.choice(topics, 3, replace=False)) | |
# Agent participation plot | |
agents = [msg["agent"] for msg in conversation] | |
if agents: | |
agent_counts = {agent: agents.count(agent) for agent in set(agents)} | |
plt.figure(figsize=(8, 4)) | |
plt.bar(agent_counts.keys(), agent_counts.values()) | |
plt.title("Agent Participation") | |
plt.ylabel("Number of Messages") | |
plt.tight_layout() | |
plt.savefig("agent_plot.png") | |
plot_path = "agent_plot.png" | |
else: | |
plot_path = None | |
return ( | |
f"Overall Sentiment: {sentiment_result}", | |
f"Key Topics: {topic_result}", | |
plot_path | |
) | |
def generate_knowledge_graph(conversation): | |
"""Generate a simple knowledge graph (placeholder)""" | |
G = nx.DiGraph() | |
entities = ["AI", "Ethics", "Society", "Technology", "Future"] | |
for i, e1 in enumerate(entities): | |
for j, e2 in enumerate(entities): | |
if i != j and np.random.random() > 0.7: | |
G.add_edge(e1, e2, weight=np.random.random()) | |
plt.figure(figsize=(10, 8)) | |
pos = nx.spring_layout(G) | |
nx.draw(G, pos, with_labels=True, node_size=2000, | |
node_color="skyblue", font_size=10, | |
edge_color="gray", width=1.5) | |
plt.title("Knowledge Graph") | |
plt.savefig("knowledge_graph.png") | |
return "knowledge_graph.png" | |
def export_handler(format_radio, conversation, current_topic, turn_count): | |
"""Export conversation in various formats""" | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
if format_radio == "txt": | |
filename = f"discussion_{timestamp}.txt" | |
with open(filename, "w") as f: | |
f.write(f"Topic: {current_topic}\nTurns: {turn_count}\n\n") | |
for msg in conversation: | |
f.write(f"{msg['agent']} (Turn {msg.get('turn', 'N/A')}):\n{msg['text']}\n\n") | |
return filename | |
elif format_radio == "pdf": | |
filename = f"discussion_{timestamp}.pdf" | |
doc = SimpleDocTemplate(filename, pagesize=letter) | |
styles = getSampleStyleSheet() | |
story = [] | |
story.append(Paragraph(f"Discussion: {current_topic}", styles["Title"])) | |
story.append(Paragraph(f"Turns: {turn_count}", styles["Normal"])) | |
story.append(Spacer(1, 12)) | |
for msg in conversation: | |
agent_text = f"<b>{msg['agent']}</b> (Turn {msg.get('turn', 'N/A')}):" | |
story.append(Paragraph(agent_text, styles["Normal"])) | |
story.append(Paragraph(msg["text"], styles["BodyText"])) | |
story.append(Spacer(1, 12)) | |
doc.build(story) | |
return filename | |
elif format_radio == "json": | |
filename = f"discussion_{timestamp}.json" | |
data = { | |
"topic": current_topic, | |
"turns": turn_count, | |
"conversation": conversation | |
} | |
with open(filename, "w") as f: | |
json.dump(data, f, indent=2) | |
return filename | |
return "export_error.txt" | |
def send_to_webhook(webhook_url, conversation, current_topic, turn_count): | |
"""Send conversation to webhook""" | |
if not webhook_url.startswith("http"): | |
return "⚠️ Invalid URL" | |
payload = { | |
"topic": current_topic, | |
"turns": turn_count, | |
"conversation": conversation | |
} | |
try: | |
response = requests.post(webhook_url, json=payload, timeout=10) | |
if response.status_code == 200: | |
return "✅ Sent successfully!" | |
return f"⚠️ Error: {response.status_code} - {response.text}" | |
except Exception as e: | |
return f"⚠️ Connection error: {str(e)}" | |
def add_user_contribution(user_input, conversation): | |
"""Add user contribution to conversation""" | |
if not user_input.strip(): | |
return format_conversation_html(conversation), "Please enter a message", conversation | |
new_entry = { | |
"agent": "User", | |
"text": user_input, | |
"turn": len(conversation) + 1 | |
} | |
updated_conversation = conversation + [new_entry] | |
embed_and_store(user_input, "User") | |
return format_conversation_html(updated_conversation), "✅ Added your contribution!", updated_conversation | |
def update_agent_params(*args): | |
"""Update agent parameters from sliders""" | |
# Last argument is the current params state | |
current_params = args[-1] | |
sliders = args[:-1] | |
# Map sliders to agent parameters | |
agents = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"] | |
params = ["creativity", "criticality"] | |
updated_params = {} | |
slider_index = 0 | |
for agent in agents: | |
updated_params[agent] = {} | |
for param in params: | |
updated_params[agent][param] = sliders[slider_index] | |
slider_index += 1 | |
return updated_params | |
# Custom CSS | |
demo.css = """ | |
.convo-container { | |
max-height: 400px; | |
overflow-y: auto; | |
padding: 15px; | |
border: 1px solid #e0e0e0; | |
border-radius: 8px; | |
background-color: #f9f9f9; | |
line-height: 1.6; | |
} | |
.convo-container p { | |
margin-bottom: 10px; | |
} | |
#topic-display { | |
font-weight: bold; | |
font-size: 1.1em; | |
} | |
.free-model-notice { | |
background-color: #e6f7ff; | |
padding: 10px; | |
border-radius: 5px; | |
margin-bottom: 15px; | |
border-left: 4px solid #1890ff; | |
} | |
""" | |
# Free model notice | |
gr.Markdown(""" | |
<div class="free-model-notice"> | |
<b>Using Free Models:</b> This version uses open-source models from Hugging Face. | |
Responses may be slower and less refined than commercial APIs. | |
Consider using local GPU for better performance. | |
</div> | |
""") | |
# Event handlers with proper state management | |
qbox.submit( | |
overseer_respond, | |
inputs=[qbox, conversation_state, current_topic_state], | |
outputs=[overseer_out] | |
) | |
ruling_qbox.submit( | |
ask_judge, | |
inputs=[ruling_qbox, conversation_state, current_topic_state], | |
outputs=[judge_out] | |
) | |
step_btn.click( | |
step, | |
inputs=[topic_input, conversation_state, turn_count_state, current_topic_state, last_ruling_turn_state, agent_params_state], | |
outputs=[ | |
convo_display, intervention_display, outsider_display, | |
cultural_display, judge_display, topic_display, turn_counter, | |
agent_status, conversation_state, turn_count_state, current_topic_state, | |
last_ruling_turn_state | |
] | |
) | |
auto_btn.click( | |
toggle_auto, | |
inputs=[auto_mode_state], | |
outputs=[auto_btn, auto_mode_state] | |
) | |
clear_btn.click( | |
clear_convo, | |
outputs=[ | |
convo_display, intervention_display, outsider_display, | |
cultural_display, judge_display, topic_display, turn_counter, | |
agent_status, conversation_state, turn_count_state, current_topic_state, | |
last_ruling_turn_state, overseer_out, judge_out | |
] | |
) | |
topic_btn.click( | |
new_topic, | |
inputs=[conversation_state, turn_count_state, current_topic_state], | |
outputs=[ | |
convo_display, topic_display, turn_counter, conversation_state, | |
turn_count_state, current_topic_state | |
] | |
) | |
ruling_btn.click( | |
request_ruling, | |
inputs=[conversation_state, current_topic_state, turn_count_state, last_ruling_turn_state], | |
outputs=[judge_display, conversation_state, last_ruling_turn_state] | |
) | |
analysis_btn.click( | |
run_analysis, | |
inputs=[conversation_state], | |
outputs=[sentiment_display, topics_display, agent_plot] | |
) | |
graph_btn.click( | |
generate_knowledge_graph, | |
inputs=[conversation_state], | |
outputs=[graph_display] | |
) | |
export_btn.click( | |
export_handler, | |
inputs=[format_radio, conversation_state, current_topic_state, turn_count_state], | |
outputs=[export_result] | |
) | |
integrate_btn.click( | |
send_to_webhook, | |
inputs=[webhook_url, conversation_state, current_topic_state, turn_count_state], | |
outputs=[integration_status] | |
) | |
submit_btn.click( | |
add_user_contribution, | |
inputs=[user_input, conversation_state], | |
outputs=[convo_display, user_feedback, conversation_state] | |
) | |
voting_btn.click( | |
lambda: "✅ Your vote has been recorded!", | |
outputs=[user_feedback] | |
) | |
flag_btn.click( | |
lambda: "🚩 Issue flagged for moderator review", | |
outputs=[user_feedback] | |
) | |
# Create input list for slider change events | |
slider_inputs = [agent_sliders[f"{agent}_{param}"] | |
for agent in ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"] | |
for param in ["creativity", "critical"]] | |
for slider in slider_inputs: | |
slider.change( | |
update_agent_params, | |
inputs=slider_inputs + [agent_params_state], | |
outputs=[agent_params_state] | |
) | |
demo.launch() |