Spaces:
Sleeping
Sleeping
# app.py - Advanced Discussion Simulator with Hexa-Agent System | |
import gradio as gr | |
import openai | |
import threading | |
import time | |
import numpy as np | |
import faiss | |
import os | |
import pickle | |
from datetime import datetime | |
import re | |
# === CONFIG === | |
EMBEDDING_MODEL = "text-embedding-3-small" | |
CHAT_MODEL = "gpt-4o" | |
MEMORY_FILE = "memory.pkl" | |
INDEX_FILE = "memory.index" | |
openai.api_key = os.environ.get("OPENAI_API_KEY") | |
# === EMBEDDING UTILS === | |
def get_embedding(text, model=EMBEDDING_MODEL): | |
text = text.replace("\n", " ") | |
try: | |
response = openai.embeddings.create(input=[text], model=model) | |
return response.data[0].embedding | |
except AttributeError: | |
response = openai.Embedding.create(input=[text], model=model) | |
return response['data'][0]['embedding'] | |
def cosine_similarity(vec1, vec2): | |
vec1 = np.array(vec1) | |
vec2 = np.array(vec2) | |
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) | |
# === MEMORY INITIALIZATION === | |
memory_data = [] | |
try: | |
memory_index = faiss.read_index(INDEX_FILE) | |
with open(MEMORY_FILE, "rb") as f: | |
memory_data = pickle.load(f) | |
except: | |
memory_index = faiss.IndexFlatL2(1536) | |
# === AGENT SYSTEM PROMPTS === | |
AGENT_A_PROMPT = """You are the Discussion Initiator. Your role: | |
1. Introduce complex topics requiring multidisciplinary perspectives | |
2. Frame debates exploring tensions between values, ethics, and progress | |
3. Challenge assumptions while maintaining intellectual humility | |
4. Connect concepts across domains (science, ethics, policy, technology) | |
5. Elevate discussions beyond surface-level analysis""" | |
AGENT_B_PROMPT = """You are the Critical Responder. Your role: | |
1. Provide counterpoints with evidence-based reasoning | |
2. Identify logical fallacies and cognitive biases in arguments | |
3. Analyze implications at different scales (individual, societal, global) | |
4. Consider second and third-order consequences | |
5. Balance idealism with practical constraints""" | |
OVERSEER_PROMPT = """You are the Depth Guardian. Your role: | |
1. Ensure discussions maintain intellectual rigor | |
2. Intervene when conversations become superficial or repetitive | |
3. Highlight unexamined assumptions and blind spots | |
4. Introduce relevant frameworks (systems thinking, ethical paradigms) | |
5. Prompt consideration of marginalized perspectives | |
6. Synthesize key tensions and paradoxes""" | |
OUTSIDER_PROMPT = """You are the Cross-Disciplinary Provocateur. Your role: | |
1. Introduce radical perspectives from unrelated fields | |
2. Challenge conventional wisdom with contrarian viewpoints | |
3. Surface historical precedents and analogies | |
4. Propose unconventional solutions to complex problems | |
5. Highlight overlooked connections and systemic relationships | |
6. Question the framing of the discussion itself""" | |
CULTURAL_LENS_PROMPT = """You are the Cultural Perspective. Your role: | |
1. Provide viewpoints from diverse global cultures (Eastern, Western, Indigenous, African, etc.) | |
2. Highlight how cultural values shape perspectives on the topic | |
3. Identify cultural biases in arguments and assumptions | |
4. Share traditions and practices relevant to the discussion | |
5. Suggest culturally inclusive approaches to solutions | |
6. Bridge cultural divides through nuanced understanding | |
7. Consider post-colonial and decolonial perspectives""" | |
JUDGE_PROMPT = """You are the Impartial Judge. Your role: | |
1. Periodically review the discussion and provide balanced rulings | |
2. Identify areas of agreement and unresolved tensions | |
3. Evaluate the strength of arguments from different perspectives | |
4. Highlight the most compelling insights and critical flaws | |
5. Suggest pathways toward resolution or further inquiry | |
6. Deliver rulings with clear justification and constructive guidance | |
7. Maintain objectivity while acknowledging valid points from all sides | |
8. Consider ethical implications and practical feasibility""" | |
# === GLOBAL STATE === | |
conversation = [] | |
turn_count = 0 | |
auto_mode = False | |
current_topic = "" | |
last_ruling_turn = 0 | |
# === CHAT COMPLETION === | |
def chat_completion(system, messages, model=CHAT_MODEL): | |
try: | |
full_messages = [{"role": "system", "content": system}] | |
full_messages.extend(messages) | |
try: | |
response = openai.chat.completions.create( | |
model=model, | |
messages=full_messages, | |
temperature=0.75, | |
max_tokens=300 | |
) | |
return response.choices[0].message.content.strip() | |
except AttributeError: | |
response = openai.ChatCompletion.create( | |
model=model, | |
messages=full_messages, | |
temperature=0.75, | |
max_tokens=300 | |
) | |
return response['choices'][0]['message']['content'].strip() | |
except Exception as e: | |
return f"[API Error: {str(e)}]" | |
# === MEMORY MANAGEMENT === | |
def embed_and_store(text, agent=None): | |
try: | |
vec = get_embedding(text) | |
memory_index.add(np.array([vec], dtype='float32')) | |
memory_data.append({ | |
"text": text, | |
"timestamp": datetime.now().isoformat(), | |
"agent": agent or "system" | |
}) | |
if len(memory_data) % 5 == 0: | |
with open(MEMORY_FILE, "wb") as f: | |
pickle.dump(memory_data, f) | |
faiss.write_index(memory_index, INDEX_FILE) | |
except Exception as e: | |
print(f"Memory Error: {str(e)}") | |
# === CONVERSATION UTILITIES === | |
def format_convo(): | |
return "\n".join([f"**{m['agent']}**: {m['text']}" for m in conversation]) | |
def detect_superficiality(): | |
"""Detect shallow arguments using linguistic analysis""" | |
if len(conversation) < 3: | |
return False | |
last_texts = [m['text'] for m in conversation[-3:]] | |
# Linguistic markers of superficiality | |
superficial_indicators = [ | |
r"\b(obviously|clearly|everyone knows)\b", | |
r"\b(simply|just|merely)\b", | |
r"\b(always|never)\b", | |
r"\b(I (think|believe|feel))\b", | |
r"\b(without question|undeniably)\b" | |
] | |
# Argument depth markers | |
depth_markers = [ | |
r"\b(however|conversely|paradoxically)\b", | |
r"\b(evidence suggests|studies indicate)\b", | |
r"\b(complex interplay|multifaceted nature)\b", | |
r"\b(trade-off|tension between)\b", | |
r"\b(historical precedent|comparative analysis)\b" | |
] | |
superficial_count = 0 | |
depth_count = 0 | |
for text in last_texts: | |
for pattern in superficial_indicators: | |
if re.search(pattern, text, re.IGNORECASE): | |
superficial_count += 1 | |
for pattern in depth_markers: | |
if re.search(pattern, text, re.IGNORECASE): | |
depth_count += 1 | |
return superficial_count > depth_count * 2 | |
def detect_repetition(): | |
"""Check if recent messages are conceptually similar""" | |
if len(conversation) < 4: | |
return False | |
recent = [m['text'] for m in conversation[-4:]] | |
embeddings = [get_embedding(text) for text in recent] | |
similarity = cosine_similarity(embeddings[-1], embeddings[-3]) | |
return similarity > 0.82 | |
def detect_cultural_relevance(): | |
"""Check if cultural perspectives are needed""" | |
if len(conversation) < 2: | |
return False | |
last_texts = " ".join([m['text'] for m in conversation[-2:]]) | |
cultural_triggers = [ | |
"society", "culture", "values", "tradition", | |
"global", "western", "eastern", "indigenous", | |
"community", "norms", "beliefs", "diversity", | |
"equity", "identity", "heritage", "colonial" | |
] | |
for trigger in cultural_triggers: | |
if trigger in last_texts.lower(): | |
return True | |
return False | |
def detect_judgment_opportunity(): | |
"""Identify when the discussion is ripe for judgment""" | |
if len(conversation) < 8: | |
return False | |
# Check for unresolved tensions | |
last_texts = " ".join([m['text'] for m in conversation[-4:]]) | |
judgment_triggers = [ | |
"tension", "dilemma", "paradox", "conflict", | |
"disagreement", "opposing views", "unresolved", | |
"contradiction", "impasse", "standoff" | |
] | |
for trigger in judgment_triggers: | |
if trigger in last_texts.lower(): | |
return True | |
return False | |
# === AGENT FUNCTIONS === | |
def generate_topic(): | |
"""Generate a complex discussion topic""" | |
topic = chat_completion( | |
"Generate a complex discussion topic requiring multidisciplinary and multicultural analysis", | |
[{"role": "user", "content": "Create a topic addressing tensions between technological progress, ethics, and cultural values"}] | |
) | |
return topic.split(":")[-1].strip() if ":" in topic else topic | |
def outsider_comment(): | |
"""Generate outsider perspective""" | |
context = "\n".join([f"{m['agent']}: {m['text']}" for m in conversation[-4:]]) | |
prompt = f"Conversation Context:\n{context}\n\nProvide your cross-disciplinary perspective:" | |
return chat_completion(OUTSIDER_PROMPT, [{"role": "user", "content": prompt}]) | |
def cultural_perspective(): | |
"""Generate cultural diversity perspective""" | |
context = "\n".join([f"{m['agent']}: {m['text']}" for m in conversation[-4:]]) | |
prompt = f"Conversation Context:\n{context}\n\nProvide diverse cultural perspectives on this topic:" | |
return chat_completion(CULTURAL_LENS_PROMPT, [{"role": "user", "content": prompt}]) | |
def judge_ruling(): | |
"""Generate final judgment or ruling""" | |
global last_ruling_turn | |
# Create comprehensive context | |
context = "\n\n".join([ | |
f"Discussion Topic: {current_topic}", | |
"Key Arguments:", | |
*[f"- {m['agent']}: {m['text']}" for m in conversation[-8:]] | |
]) | |
prompt = f"""After reviewing this discussion, provide your impartial judgment: | |
{context} | |
Your ruling should: | |
1. Identify areas of agreement and unresolved tensions | |
2. Evaluate the strength of key arguments | |
3. Highlight the most compelling insights | |
4. Suggest pathways toward resolution | |
5. Consider ethical and practical implications | |
6. Provide constructive guidance for next steps""" | |
ruling = chat_completion(JUDGE_PROMPT, [{"role": "user", "content": prompt}]) | |
last_ruling_turn = turn_count | |
return ruling | |
# === CORE CONVERSATION FLOW === | |
def step(topic_input=""): | |
global conversation, turn_count, current_topic, last_ruling_turn | |
# Initialize new discussion | |
if not conversation: | |
current_topic = topic_input or generate_topic() | |
msg = chat_completion( | |
AGENT_A_PROMPT, | |
[{"role": "user", "content": f"Initiate a deep discussion on: {current_topic}"}] | |
) | |
conversation.append({"agent": "π‘ Initiator", "text": msg}) | |
embed_and_store(msg, "Initiator") | |
turn_count = 1 | |
last_ruling_turn = 0 | |
return format_convo(), "", "", "", "", current_topic | |
# Critical Responder engages | |
last_msg = conversation[-1]['text'] | |
b_msg = chat_completion( | |
AGENT_B_PROMPT, | |
[{"role": "user", "content": f"Topic: {current_topic}\n\nLast statement: {last_msg}"}] | |
) | |
conversation.append({"agent": "π Responder", "text": b_msg}) | |
embed_and_store(b_msg, "Responder") | |
# Initiator counters | |
a_msg = chat_completion( | |
AGENT_A_PROMPT, | |
[{"role": "user", "content": f"Topic: {current_topic}\n\nCritical response: {b_msg}"}] | |
) | |
conversation.append({"agent": "π‘ Initiator", "text": a_msg}) | |
embed_and_store(a_msg, "Initiator") | |
# Overseer intervention | |
intervention = "" | |
if turn_count % 3 == 0 or detect_repetition() or detect_superficiality(): | |
context = "\n".join([m['text'] for m in conversation[-4:]]) | |
prompt = f"Topic: {current_topic}\n\nDiscussion Context:\n{context}\n\nDeepen the analysis:" | |
intervention = chat_completion(OVERSEER_PROMPT, [{"role": "user", "content": prompt}]) | |
conversation.append({"agent": "βοΈ Depth Guardian", "text": intervention}) | |
embed_and_store(intervention, "Overseer") | |
# Outsider commentary | |
outsider_msg = "" | |
if turn_count % 4 == 0 or "paradox" in last_msg.lower(): | |
outsider_msg = outsider_comment() | |
conversation.append({"agent": "π Provocateur", "text": outsider_msg}) | |
embed_and_store(outsider_msg, "Outsider") | |
# Cultural perspective | |
cultural_msg = "" | |
if turn_count % 5 == 0 or detect_cultural_relevance(): | |
cultural_msg = cultural_perspective() | |
conversation.append({"agent": "π Cultural Lens", "text": cultural_msg}) | |
embed_and_store(cultural_msg, "Cultural") | |
# Judge ruling | |
judge_msg = "" | |
ruling_interval = 6 # Turns between rulings | |
if (turn_count - last_ruling_turn >= ruling_interval and | |
(turn_count % ruling_interval == 0 or detect_judgment_opportunity())): | |
judge_msg = judge_ruling() | |
conversation.append({"agent": "βοΈ Judge", "text": judge_msg}) | |
embed_and_store(judge_msg, "Judge") | |
turn_count += 1 | |
return format_convo(), intervention, outsider_msg, cultural_msg, judge_msg, current_topic | |
# === OVERSEER QUERY HANDLER === | |
def overseer_respond(query): | |
try: | |
context = "\n".join([m['text'] for m in conversation[-3:]]) if conversation else "No context" | |
messages = [{"role": "user", "content": f"Discussion Topic: {current_topic}\n\nRecent context:\n{context}\n\nQuery: {query}"}] | |
return chat_completion(OVERSEER_PROMPT, messages) | |
except Exception as e: | |
return f"[Overseer Error: {str(e)}]" | |
# === JUDGE RULING HANDLER === | |
def request_ruling(): | |
try: | |
ruling = judge_ruling() | |
conversation.append({"agent": "βοΈ Judge", "text": ruling}) | |
embed_and_store(ruling, "Judge") | |
return ruling | |
except Exception as e: | |
return f"[Judge Error: {str(e)}]" | |
# === AUTO MODE HANDLER === | |
def auto_loop(): | |
global auto_mode | |
while auto_mode: | |
step() | |
time.sleep(6) | |
def toggle_auto(): | |
global auto_mode | |
auto_mode = not auto_mode | |
if auto_mode: | |
threading.Thread(target=auto_loop, daemon=True).start() | |
return "π΄ Auto: OFF" if not auto_mode else "π’ Auto: ON" | |
# === GRADIO UI === | |
with gr.Blocks(title="Advanced Discussion Simulator") as demo: | |
gr.Markdown("# π§ Advanced Discussion Simulator") | |
gr.Markdown("### Hexa-Agent System for Complex Discourse") | |
with gr.Row(): | |
topic_display = gr.Textbox(label="Current Topic", interactive=False) | |
with gr.Row(): | |
convo_display = gr.Markdown( | |
value="**Discussion will appear here**", | |
elem_id="convo-display", | |
elem_classes="convo-scroll" | |
) | |
with gr.Row(): | |
step_btn = gr.Button("βΆοΈ Next Turn", variant="primary") | |
auto_btn = gr.Button("π΄ Auto: OFF", variant="secondary") | |
clear_btn = gr.Button("π New Discussion", variant="stop") | |
topic_btn = gr.Button("π² Random Topic", variant="secondary") | |
ruling_btn = gr.Button("βοΈ Request Ruling", variant="primary") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("### βοΈ Depth Guardian") | |
intervention_display = gr.Textbox(label="", interactive=False) | |
with gr.Column(scale=1): | |
gr.Markdown("### π Cross-Disciplinary") | |
outsider_display = gr.Textbox(label="", interactive=False) | |
with gr.Column(scale=1): | |
gr.Markdown("### π Cultural Lens") | |
cultural_display = gr.Textbox(label="", interactive=False) | |
with gr.Row(): | |
with gr.Column(scale=3): | |
gr.Markdown("### βοΈ Final Judgment") | |
judge_display = gr.Textbox(label="", interactive=False, lines=4) | |
with gr.Accordion("π¬ Guide the Discussion", open=False): | |
topic_input = gr.Textbox(label="Set Custom Topic", placeholder="e.g., Ethics of AGI in cultural contexts...") | |
with gr.Row(): | |
qbox = gr.Textbox(label="Ask the Depth Guardian", placeholder="What perspectives are missing?") | |
ruling_qbox = gr.Textbox(label="Specific Question for Judge", placeholder="What should be our guiding principle?") | |
with gr.Row(): | |
overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False) | |
judge_out = gr.Textbox(label="Judge's Response", interactive=False) | |
# Custom CSS for scrollable conversation | |
demo.css = """ | |
.convo-scroll { | |
max-height: 400px; | |
overflow-y: auto; | |
padding: 10px; | |
border: 1px solid #e0e0e0; | |
border-radius: 5px; | |
} | |
""" | |
# Event handlers | |
def clear_convo(): | |
global conversation, turn_count, current_topic | |
conversation = [] | |
turn_count = 0 | |
current_topic = "" | |
return "**New discussion started**", "", "", "", "", "", "" | |
def new_topic(): | |
clear_convo() | |
topic = generate_topic() | |
return "", "", "", "", "", topic, topic | |
def ask_judge(query): | |
try: | |
context = "\n".join([m['text'] for m in conversation[-3:]]) if conversation else "No context" | |
messages = [{"role": "user", "content": f"Discussion Topic: {current_topic}\n\nRecent context:\n{context}\n\nSpecific Question: {query}"}] | |
return chat_completion(JUDGE_PROMPT, messages) | |
except Exception as e: | |
return f"[Judge Error: {str(e)}]" | |
step_btn.click( | |
step, | |
inputs=[topic_input], | |
outputs=[convo_display, intervention_display, outsider_display, cultural_display, judge_display, topic_display] | |
) | |
qbox.submit(overseer_respond, inputs=qbox, outputs=overseer_out) | |
ruling_qbox.submit(ask_judge, inputs=ruling_qbox, outputs=judge_out) | |
auto_btn.click(toggle_auto, outputs=auto_btn) | |
clear_btn.click( | |
clear_convo, | |
outputs=[convo_display, intervention_display, outsider_display, cultural_display, judge_display, topic_display, overseer_out] | |
) | |
topic_btn.click( | |
new_topic, | |
outputs=[convo_display, intervention_display, outsider_display, cultural_display, judge_display, topic_display, overseer_out] | |
) | |
ruling_btn.click( | |
request_ruling, | |
outputs=[judge_display] | |
) | |
demo.launch() |