Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py - DeepSeek Hexa-Agent Discussion Platform (Free Version)
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import threading
|
5 |
+
import time
|
6 |
+
import numpy as np
|
7 |
+
import faiss
|
8 |
+
import os
|
9 |
+
import pickle
|
10 |
+
from datetime import datetime
|
11 |
+
import re
|
12 |
+
import json
|
13 |
+
import matplotlib.pyplot as plt
|
14 |
+
import networkx as nx
|
15 |
+
from reportlab.lib.pagesizes import letter
|
16 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
17 |
+
from reportlab.lib.styles import getSampleStyleSheet
|
18 |
+
from functools import lru_cache
|
19 |
+
from sentence_transformers import SentenceTransformer
|
20 |
+
|
21 |
+
# === CONFIG ===
|
22 |
+
EMBEDDING_MODEL = "all-MiniLM-L6-v2" # Local embedding model
|
23 |
+
CHAT_MODEL = "HuggingFaceH4/zephyr-7b-beta" # Free model via Hugging Face API
|
24 |
+
MEMORY_FILE = "memory.pkl"
|
25 |
+
INDEX_FILE = "memory.index"
|
26 |
+
HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "") # Optional for higher rate limits
|
27 |
+
|
28 |
+
# Initialize local embedding model
|
29 |
+
embedding_model = SentenceTransformer(EMBEDDING_MODEL)
|
30 |
+
|
31 |
+
# === EMBEDDING UTILS ===
|
32 |
+
@lru_cache(maxsize=500)
|
33 |
+
def get_embedding(text):
|
34 |
+
"""Local embedding function"""
|
35 |
+
return embedding_model.encode(text)
|
36 |
+
|
37 |
+
def cosine_similarity(vec1, vec2):
|
38 |
+
vec1 = np.array(vec1)
|
39 |
+
vec2 = np.array(vec2)
|
40 |
+
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
|
41 |
+
|
42 |
+
# === MEMORY INITIALIZATION ===
|
43 |
+
memory_data = []
|
44 |
+
try:
|
45 |
+
memory_index = faiss.read_index(INDEX_FILE)
|
46 |
+
with open(MEMORY_FILE, "rb") as f:
|
47 |
+
memory_data = pickle.load(f)
|
48 |
+
except:
|
49 |
+
memory_index = faiss.IndexFlatL2(384) # 384 dimensions for MiniLM
|
50 |
+
|
51 |
+
# === AGENT SYSTEM PROMPTS ===
|
52 |
+
AGENT_A_PROMPT = """You are the Discussion Initiator. Your role:
|
53 |
+
1. Introduce complex topics requiring multidisciplinary perspectives
|
54 |
+
2. Frame debates exploring tensions between values, ethics, and progress
|
55 |
+
3. Challenge assumptions while maintaining intellectual humility
|
56 |
+
4. Connect concepts across domains (science, ethics, policy, technology)
|
57 |
+
5. Elevate discussions beyond surface-level analysis"""
|
58 |
+
|
59 |
+
AGENT_B_PROMPT = """You are the Critical Responder. Your role:
|
60 |
+
1. Provide counterpoints with evidence-based reasoning
|
61 |
+
2. Identify logical fallacies and cognitive biases in arguments
|
62 |
+
3. Analyze implications at different scales (individual, societal, global)
|
63 |
+
4. Consider second and third-order consequences
|
64 |
+
5. Balance idealism with practical constraints"""
|
65 |
+
|
66 |
+
OVERSEER_PROMPT = """You are the Depth Guardian. Your role:
|
67 |
+
1. Ensure discussions maintain intellectual rigor
|
68 |
+
2. Intervene when conversations become superficial or repetitive
|
69 |
+
3. Highlight unexamined assumptions and blind spots
|
70 |
+
4. Introduce relevant frameworks (systems thinking, ethical paradigms)
|
71 |
+
5. Prompt consideration of marginalized perspectives
|
72 |
+
6. Synthesize key tensions and paradoxes"""
|
73 |
+
|
74 |
+
OUTSIDER_PROMPT = """You are the Cross-Disciplinary Provocateur. Your role:
|
75 |
+
1. Introduce radical perspectives from unrelated fields
|
76 |
+
2. Challenge conventional wisdom with contrarian viewpoints
|
77 |
+
3. Surface historical precedents and analogies
|
78 |
+
4. Propose unconventional solutions to complex problems
|
79 |
+
5. Highlight overlooked connections and systemic relationships
|
80 |
+
6. Question the framing of the discussion itself"""
|
81 |
+
|
82 |
+
CULTURAL_LENS_PROMPT = """You are the Cultural Perspective. Your role:
|
83 |
+
1. Provide viewpoints from diverse global cultures (Eastern, Western, Indigenous, African, etc.)
|
84 |
+
2. Highlight how cultural values shape perspectives on the topic
|
85 |
+
3. Identify cultural biases in arguments and assumptions
|
86 |
+
4. Share traditions and practices relevant to the discussion
|
87 |
+
5. Suggest culturally inclusive approaches to solutions
|
88 |
+
6. Bridge cultural divides through nuanced understanding
|
89 |
+
7. Consider post-colonial and decolonial perspectives"""
|
90 |
+
|
91 |
+
JUDGE_PROMPT = """You are the Impartial Judge. Your role:
|
92 |
+
1. Periodically review the discussion and provide balanced rulings
|
93 |
+
2. Identify areas of agreement and unresolved tensions
|
94 |
+
3. Evaluate the strength of arguments from different perspectives
|
95 |
+
4. Highlight the most compelling insights and critical flaws
|
96 |
+
5. Suggest pathways toward resolution or further inquiry
|
97 |
+
6. Deliver rulings with clear justification and constructive guidance
|
98 |
+
7. Maintain objectivity while acknowledging valid points from all sides
|
99 |
+
8. Consider ethical implications and practical feasibility"""
|
100 |
+
|
101 |
+
# === GLOBAL STATE ===
|
102 |
+
conversation = []
|
103 |
+
turn_count = 0
|
104 |
+
auto_mode = False
|
105 |
+
current_topic = ""
|
106 |
+
last_ruling_turn = 0
|
107 |
+
agent_params = {
|
108 |
+
"Initiator": {"creativity": 0.7, "criticality": 0.5},
|
109 |
+
"Responder": {"creativity": 0.5, "criticality": 0.8},
|
110 |
+
"Guardian": {"creativity": 0.6, "criticality": 0.9},
|
111 |
+
"Provocateur": {"creativity": 0.9, "criticality": 0.7},
|
112 |
+
"Cultural": {"creativity": 0.7, "criticality": 0.6},
|
113 |
+
"Judge": {"creativity": 0.4, "criticality": 0.9}
|
114 |
+
}
|
115 |
+
|
116 |
+
# === FREE CHAT COMPLETION API ===
|
117 |
+
def safe_chat_completion(system, messages, temperature=0.7):
|
118 |
+
"""Use free Hugging Face Inference API"""
|
119 |
+
try:
|
120 |
+
# Format messages for Hugging Face API
|
121 |
+
formatted = [{"role": "system", "content": system}]
|
122 |
+
formatted.extend(messages)
|
123 |
+
|
124 |
+
# Prepare payload
|
125 |
+
payload = {
|
126 |
+
"inputs": formatted,
|
127 |
+
"parameters": {
|
128 |
+
"max_new_tokens": 300,
|
129 |
+
"temperature": temperature,
|
130 |
+
"return_full_text": False
|
131 |
+
}
|
132 |
+
}
|
133 |
+
|
134 |
+
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} if HF_API_TOKEN else {}
|
135 |
+
|
136 |
+
response = requests.post(
|
137 |
+
f"https://api-inference.huggingface.co/models/{CHAT_MODEL}",
|
138 |
+
json=payload,
|
139 |
+
headers=headers,
|
140 |
+
timeout=60
|
141 |
+
)
|
142 |
+
|
143 |
+
if response.status_code == 200:
|
144 |
+
return response.json()[0]['generated_text'].strip()
|
145 |
+
elif response.status_code == 503: # Model loading
|
146 |
+
time.sleep(15)
|
147 |
+
return safe_chat_completion(system, messages, temperature)
|
148 |
+
else:
|
149 |
+
return f"β οΈ API Error: {response.text}"
|
150 |
+
|
151 |
+
except Exception as e:
|
152 |
+
return f"β οΈ System Error: {str(e)}"
|
153 |
+
|
154 |
+
# === MEMORY MANAGEMENT ===
|
155 |
+
def embed_and_store(text, agent=None):
|
156 |
+
try:
|
157 |
+
vec = get_embedding(text)
|
158 |
+
memory_index.add(np.array([vec], dtype='float32'))
|
159 |
+
memory_data.append({
|
160 |
+
"text": text,
|
161 |
+
"timestamp": datetime.now().isoformat(),
|
162 |
+
"agent": agent or "system",
|
163 |
+
"topic": current_topic
|
164 |
+
})
|
165 |
+
if len(memory_data) % 5 == 0:
|
166 |
+
with open(MEMORY_FILE, "wb") as f:
|
167 |
+
pickle.dump(memory_data, f)
|
168 |
+
faiss.write_index(memory_index, INDEX_FILE)
|
169 |
+
except Exception as e:
|
170 |
+
print(f"Memory Error: {str(e)}")
|
171 |
+
|
172 |
+
def retrieve_relevant_memory(query, k=3):
|
173 |
+
"""Retrieve relevant past discussions"""
|
174 |
+
try:
|
175 |
+
query_embedding = get_embedding(query)
|
176 |
+
distances, indices = memory_index.search(np.array([query_embedding], dtype='float32'), k)
|
177 |
+
|
178 |
+
relevant = []
|
179 |
+
for i, idx in enumerate(indices[0]):
|
180 |
+
if idx < len(memory_data) and idx >= 0:
|
181 |
+
relevant.append({
|
182 |
+
"text": memory_data[idx]['text'][:200] + "...",
|
183 |
+
"topic": memory_data[idx].get('topic', 'Unknown'),
|
184 |
+
"agent": memory_data[idx].get('agent', 'Unknown'),
|
185 |
+
"similarity": 1 - distances[0][i] # Convert distance to similarity
|
186 |
+
})
|
187 |
+
return relevant
|
188 |
+
except Exception as e:
|
189 |
+
print(f"Memory retrieval error: {str(e)}")
|
190 |
+
return []
|
191 |
+
|
192 |
+
# ... [Rest of the functions remain the same as previous implementation] ...
|
193 |
+
# Keep all the functions from the previous implementation except:
|
194 |
+
# - safe_chat_completion (already replaced above)
|
195 |
+
# - get_embedding (already replaced above)
|
196 |
+
|
197 |
+
# === GRADIO UI ===
|
198 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as demo:
|
199 |
+
gr.Markdown("# π§ Hexa-Agent Discussion System (Free Version)")
|
200 |
+
gr.Markdown("### Powered by Open-Source Models")
|
201 |
+
|
202 |
+
# Status panel
|
203 |
+
with gr.Row():
|
204 |
+
turn_counter = gr.Number(label="Turn Count", value=0, interactive=False)
|
205 |
+
topic_display = gr.Textbox(label="Current Topic", interactive=False, lines=2)
|
206 |
+
agent_status = gr.Textbox(label="Active Agents", value="π‘ Initiator, π Responder", interactive=False)
|
207 |
+
|
208 |
+
# Tabbed interface
|
209 |
+
with gr.Tab("Live Discussion"):
|
210 |
+
convo_display = gr.HTML(
|
211 |
+
value="<div class='convo-container'>Discussion will appear here</div>",
|
212 |
+
elem_id="convo-display"
|
213 |
+
)
|
214 |
+
|
215 |
+
with gr.Row():
|
216 |
+
step_btn = gr.Button("βΆοΈ Next Turn", variant="primary")
|
217 |
+
auto_btn = gr.Button("π΄ Auto: OFF", variant="secondary")
|
218 |
+
clear_btn = gr.Button("π New Discussion", variant="stop")
|
219 |
+
topic_btn = gr.Button("π² Random Topic", variant="secondary")
|
220 |
+
ruling_btn = gr.Button("βοΈ Request Ruling", variant="primary")
|
221 |
+
|
222 |
+
with gr.Accordion("π¬ Guide the Discussion", open=False):
|
223 |
+
topic_input = gr.Textbox(label="Set Custom Topic", placeholder="e.g., Ethics of AGI in cultural contexts...")
|
224 |
+
with gr.Row():
|
225 |
+
qbox = gr.Textbox(label="Ask the Depth Guardian", placeholder="What perspectives are missing?")
|
226 |
+
ruling_qbox = gr.Textbox(label="Specific Question for Judge", placeholder="What should be our guiding principle?")
|
227 |
+
with gr.Row():
|
228 |
+
overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False)
|
229 |
+
judge_out = gr.Textbox(label="Judge's Response", interactive=False)
|
230 |
+
|
231 |
+
with gr.Tab("Agent Perspectives"):
|
232 |
+
with gr.Row():
|
233 |
+
with gr.Column(scale=1):
|
234 |
+
gr.Markdown("### βοΈ Depth Guardian")
|
235 |
+
intervention_display = gr.Textbox(label="", interactive=False)
|
236 |
+
with gr.Column(scale=1):
|
237 |
+
gr.Markdown("### π Cross-Disciplinary")
|
238 |
+
outsider_display = gr.Textbox(label="", interactive=False)
|
239 |
+
with gr.Column(scale=1):
|
240 |
+
gr.Markdown("### π Cultural Lens")
|
241 |
+
cultural_display = gr.Textbox(label="", interactive=False)
|
242 |
+
|
243 |
+
with gr.Row():
|
244 |
+
with gr.Column(scale=3):
|
245 |
+
gr.Markdown("### βοΈ Final Judgment")
|
246 |
+
judge_display = gr.Textbox(label="", interactive=False, lines=4)
|
247 |
+
|
248 |
+
with gr.Tab("Analysis Dashboard"):
|
249 |
+
gr.Markdown("### Conversation Insights")
|
250 |
+
with gr.Row():
|
251 |
+
sentiment_display = gr.Textbox(label="Discussion Sentiment", interactive=False)
|
252 |
+
topics_display = gr.JSON(label="Key Topics")
|
253 |
+
|
254 |
+
with gr.Row():
|
255 |
+
agent_plot = gr.Plot(label="Agent Participation")
|
256 |
+
analysis_btn = gr.Button("Generate Insights", variant="primary")
|
257 |
+
|
258 |
+
with gr.Row():
|
259 |
+
gr.Markdown("### Knowledge Graph")
|
260 |
+
graph_btn = gr.Button("Generate Knowledge Graph", variant="secondary")
|
261 |
+
graph_display = gr.Image(label="Concept Relationships", interactive=False)
|
262 |
+
|
263 |
+
with gr.Tab("Collaboration"):
|
264 |
+
gr.Markdown("### Real-Time Collaboration")
|
265 |
+
with gr.Row():
|
266 |
+
user_input = gr.Textbox(label="Your Contribution", placeholder="Add your perspective...")
|
267 |
+
submit_btn = gr.Button("Add to Discussion")
|
268 |
+
|
269 |
+
with gr.Row():
|
270 |
+
voting_btn = gr.Button("π Vote for Current Direction")
|
271 |
+
flag_btn = gr.Button("π© Flag Issue")
|
272 |
+
|
273 |
+
with gr.Row():
|
274 |
+
user_feedback = gr.Textbox(label="Community Feedback", interactive=False)
|
275 |
+
|
276 |
+
with gr.Tab("Export & Integrate"):
|
277 |
+
with gr.Row():
|
278 |
+
format_radio = gr.Radio(
|
279 |
+
["PDF Report", "JSON Data", "Text Transcript"],
|
280 |
+
label="Export Format",
|
281 |
+
value="PDF Report"
|
282 |
+
)
|
283 |
+
export_btn = gr.Button("Export Discussion", variant="primary")
|
284 |
+
export_result = gr.File(label="Download File")
|
285 |
+
|
286 |
+
with gr.Row():
|
287 |
+
gr.Markdown("### API Integration")
|
288 |
+
webhook_url = gr.Textbox(label="Webhook URL", placeholder="https://your-platform.com/webhook")
|
289 |
+
integrate_btn = gr.Button("Connect to External Platform", variant="secondary")
|
290 |
+
integration_status = gr.Textbox(label="Status", interactive=False)
|
291 |
+
|
292 |
+
with gr.Tab("Agent Configuration"):
|
293 |
+
gr.Markdown("### Customize Agent Behavior")
|
294 |
+
with gr.Row():
|
295 |
+
agent_sliders = {}
|
296 |
+
for agent in ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]:
|
297 |
+
with gr.Column():
|
298 |
+
gr.Markdown(f"#### {agent}")
|
299 |
+
agent_sliders[f"{agent}_creativity"] = gr.Slider(
|
300 |
+
0.0, 1.0, value=0.7,
|
301 |
+
label="Creativity", interactive=True
|
302 |
+
)
|
303 |
+
agent_sliders[f"{agent}_critical"] = gr.Slider(
|
304 |
+
0.0, 1.0, value=0.5,
|
305 |
+
label="Criticality", interactive=True
|
306 |
+
)
|
307 |
+
|
308 |
+
# Custom CSS
|
309 |
+
demo.css = """
|
310 |
+
.convo-container {
|
311 |
+
max-height: 400px;
|
312 |
+
overflow-y: auto;
|
313 |
+
padding: 15px;
|
314 |
+
border: 1px solid #e0e0e0;
|
315 |
+
border-radius: 8px;
|
316 |
+
background-color: #f9f9f9;
|
317 |
+
line-height: 1.6;
|
318 |
+
}
|
319 |
+
.convo-container p {
|
320 |
+
margin-bottom: 10px;
|
321 |
+
}
|
322 |
+
#topic-display {
|
323 |
+
font-weight: bold;
|
324 |
+
font-size: 1.1em;
|
325 |
+
}
|
326 |
+
.free-model-notice {
|
327 |
+
background-color: #e6f7ff;
|
328 |
+
padding: 10px;
|
329 |
+
border-radius: 5px;
|
330 |
+
margin-bottom: 15px;
|
331 |
+
border-left: 4px solid #1890ff;
|
332 |
+
}
|
333 |
+
"""
|
334 |
+
|
335 |
+
# Free model notice
|
336 |
+
gr.Markdown("""
|
337 |
+
<div class="free-model-notice">
|
338 |
+
<b>Using Free Models:</b> This version uses open-source models from Hugging Face.
|
339 |
+
Responses may be slower and less refined than commercial APIs.
|
340 |
+
Consider using local GPU for better performance.
|
341 |
+
</div>
|
342 |
+
""")
|
343 |
+
|
344 |
+
# ... [Keep all the event handlers from the previous implementation] ...
|
345 |
+
# The event handlers remain unchanged from the last working version
|
346 |
+
|
347 |
+
demo.launch()
|