Leonydis137 commited on
Commit
cfdb7f5
·
verified ·
1 Parent(s): b5ad884

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +363 -0
app.py CHANGED
@@ -189,7 +189,370 @@ def retrieve_relevant_memory(query, k=3):
189
  return []
190
 
191
  # ... [Previous code remains unchanged] ...
 
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  # === GRADIO UI ===
194
  with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as demo:
195
  gr.Markdown("# 🧠 Hexa-Agent Discussion System (Free Version)")
 
189
  return []
190
 
191
  # ... [Previous code remains unchanged] ...
192
+ # ... [Imports and constants remain unchanged] ...
193
 
194
+ # === FUNCTION DEFINITIONS ===
195
+ def overseer_respond(question, conversation, current_topic):
196
+ """Get response from Depth Guardian"""
197
+ context = f"Current Topic: {current_topic}\n\n" if current_topic else ""
198
+ context += "Conversation History:\n"
199
+ for msg in conversation[-5:]:
200
+ context += f"- {msg['agent']}: {msg['text']}\n"
201
+
202
+ response = safe_chat_completion(
203
+ system=OVERSEER_PROMPT,
204
+ messages=[{"role": "user", "content": f"{context}\nQuestion: {question}"}],
205
+ temperature=0.8
206
+ )
207
+ embed_and_store(response, "Guardian", current_topic)
208
+ return response
209
+
210
+ def ask_judge(question, conversation, current_topic):
211
+ """Get ruling from Judge"""
212
+ context = f"Topic: {current_topic}\n\n" if current_topic else ""
213
+ context += "Recent Discussion:\n"
214
+ for msg in conversation[-5:]:
215
+ context += f"- {msg['agent']}: {msg['text']}\n"
216
+
217
+ response = safe_chat_completion(
218
+ system=JUDGE_PROMPT,
219
+ messages=[{"role": "user", "content": f"{context}\nSpecific Question: {question}"}],
220
+ temperature=0.6
221
+ )
222
+ embed_and_store(response, "Judge", current_topic)
223
+ return response
224
+
225
+ def step(topic_input, conversation, turn_count, current_topic, last_ruling_turn, agent_params):
226
+ """Advance the discussion by one turn"""
227
+ # Set topic on first turn
228
+ if turn_count == 0:
229
+ if topic_input.strip():
230
+ current_topic = topic_input.strip()
231
+ else:
232
+ current_topic = "Ethical Implications of Advanced AI Systems"
233
+
234
+ # Determine which agent speaks
235
+ agent_sequence = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural"]
236
+ agent_index = turn_count % len(agent_sequence)
237
+ agent_name = agent_sequence[agent_index]
238
+
239
+ # Special handling for Judge
240
+ judge_interval = 5
241
+ if turn_count - last_ruling_turn >= judge_interval and turn_count > 0:
242
+ agent_name = "Judge"
243
+
244
+ # Get system prompt and temperature
245
+ prompts = {
246
+ "Initiator": AGENT_A_PROMPT,
247
+ "Responder": AGENT_B_PROMPT,
248
+ "Guardian": OVERSEER_PROMPT,
249
+ "Provocateur": OUTSIDER_PROMPT,
250
+ "Cultural": CULTURAL_LENS_PROMPT,
251
+ "Judge": JUDGE_PROMPT
252
+ }
253
+ temperature = agent_params[agent_name]["creativity"]
254
+
255
+ # Prepare context
256
+ context = f"Current Topic: {current_topic}\n\nDiscussion History:\n"
257
+ for msg in conversation[-5:]:
258
+ context += f"{msg['agent']}: {msg['text']}\n\n"
259
+
260
+ # Generate response
261
+ response = safe_chat_completion(
262
+ system=prompts[agent_name],
263
+ messages=[{"role": "user", "content": context}],
264
+ temperature=temperature
265
+ )
266
+
267
+ # Create message entry
268
+ new_entry = {
269
+ "agent": agent_name,
270
+ "text": response,
271
+ "turn": turn_count + 1
272
+ }
273
+
274
+ # Update state
275
+ updated_conversation = conversation + [new_entry]
276
+ new_turn_count = turn_count + 1
277
+ new_last_ruling_turn = new_turn_count if agent_name == "Judge" else last_ruling_turn
278
+
279
+ # Update memory
280
+ embed_and_store(response, agent_name, current_topic)
281
+
282
+ # Format HTML output
283
+ html_output = format_conversation_html(updated_conversation)
284
+
285
+ # Get agent-specific displays
286
+ intervention = get_last_by_agent(updated_conversation, "Guardian")
287
+ outsider = get_last_by_agent(updated_conversation, "Provocateur")
288
+ cultural = get_last_by_agent(updated_conversation, "Cultural")
289
+ judge = get_last_by_agent(updated_conversation, "Judge")
290
+
291
+ # Prepare agent status
292
+ active_agents = " | ".join([f"{agent}: {entry['text'][:30]}..." for agent, entry in zip(
293
+ ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"],
294
+ [new_entry] * 6 # Simplified for demo
295
+ )])
296
+
297
+ return (
298
+ html_output,
299
+ intervention,
300
+ outsider,
301
+ cultural,
302
+ judge,
303
+ current_topic,
304
+ new_turn_count,
305
+ active_agents,
306
+ updated_conversation,
307
+ new_turn_count,
308
+ current_topic,
309
+ new_last_ruling_turn,
310
+ agent_params
311
+ )
312
+
313
+ def get_last_by_agent(conversation, agent_name):
314
+ """Get last message from specific agent"""
315
+ for msg in reversed(conversation):
316
+ if msg["agent"] == agent_name:
317
+ return msg["text"]
318
+ return "No message yet"
319
+
320
+ def format_conversation_html(conversation):
321
+ """Format conversation as HTML"""
322
+ html = "<div class='convo-container'>"
323
+ for msg in conversation:
324
+ agent = msg["agent"]
325
+ color_map = {
326
+ "Initiator": "#e6f7ff",
327
+ "Responder": "#f6ffed",
328
+ "Guardian": "#fff7e6",
329
+ "Provocateur": "#f9e6ff",
330
+ "Cultural": "#e6ffed",
331
+ "Judge": "#f0f0f0",
332
+ "User": "#f0f0f0"
333
+ }
334
+ color = color_map.get(agent, "#ffffff")
335
+ html += f"""
336
+ <div style='background:{color}; padding:10px; margin:10px; border-radius:5px;'>
337
+ <b>{agent}:</b> {msg['text']}
338
+ </div>
339
+ """
340
+ html += "</div>"
341
+ return html
342
+
343
+ def toggle_auto(auto_mode):
344
+ """Toggle auto-advance mode"""
345
+ new_mode = not auto_mode
346
+ return ("🟢 Auto: ON" if new_mode else "🔴 Auto: OFF", new_mode)
347
+
348
+ def clear_convo():
349
+ """Reset conversation"""
350
+ return (
351
+ format_conversation_html([]),
352
+ "",
353
+ "",
354
+ "",
355
+ "",
356
+ "",
357
+ 0,
358
+ "💡 Initiator, 🔍 Responder",
359
+ [],
360
+ 0,
361
+ "",
362
+ 0,
363
+ "",
364
+ ""
365
+ )
366
+
367
+ def new_topic(conversation, turn_count, current_topic, last_ruling_turn):
368
+ """Generate a new discussion topic"""
369
+ topics = [
370
+ "The Ethics of Genetic Engineering in Humans",
371
+ "Universal Basic Income in the Age of Automation",
372
+ "Cultural Impacts of Global AI Deployment",
373
+ "Privacy vs Security in Digital Societies",
374
+ "The Future of Human-AI Collaboration"
375
+ ]
376
+ new_topic = np.random.choice(topics)
377
+ return (
378
+ format_conversation_html([]),
379
+ new_topic,
380
+ 0,
381
+ [],
382
+ 0,
383
+ new_topic,
384
+ 0
385
+ )
386
+
387
+ def request_ruling(conversation, current_topic, turn_count, last_ruling_turn):
388
+ """Request a ruling from the Judge"""
389
+ context = f"Topic: {current_topic}\n\nDiscussion Summary:\n"
390
+ for msg in conversation[-5:]:
391
+ context += f"- {msg['agent']}: {msg['text']}\n"
392
+
393
+ response = safe_chat_completion(
394
+ system=JUDGE_PROMPT,
395
+ messages=[{"role": "user", "content": f"{context}\nPlease provide a comprehensive ruling."}],
396
+ temperature=0.5
397
+ )
398
+
399
+ new_entry = {
400
+ "agent": "Judge",
401
+ "text": response,
402
+ "turn": turn_count
403
+ }
404
+ updated_conversation = conversation + [new_entry]
405
+ new_last_ruling_turn = turn_count
406
+ return response, updated_conversation, new_last_ruling_turn
407
+
408
+ def run_analysis(conversation):
409
+ """Run basic analysis (simplified for free version)"""
410
+ # Sentiment analysis placeholder
411
+ sentiments = ["Positive", "Neutral", "Negative"]
412
+ sentiment_result = np.random.choice(sentiments, p=[0.4, 0.4, 0.2])
413
+
414
+ # Topic extraction placeholder
415
+ topics = ["AI Ethics", "Policy", "Cultural Impact", "Technology", "Future Scenarios"]
416
+ topic_result = ", ".join(np.random.choice(topics, 3, replace=False))
417
+
418
+ # Agent participation plot
419
+ agents = [msg["agent"] for msg in conversation]
420
+ if agents:
421
+ agent_counts = {agent: agents.count(agent) for agent in set(agents)}
422
+ plt.figure(figsize=(8, 4))
423
+ plt.bar(agent_counts.keys(), agent_counts.values())
424
+ plt.title("Agent Participation")
425
+ plt.ylabel("Number of Messages")
426
+ plt.tight_layout()
427
+ plt.savefig("agent_plot.png")
428
+ plot_path = "agent_plot.png"
429
+ else:
430
+ plot_path = None
431
+
432
+ return (
433
+ f"Overall Sentiment: {sentiment_result}",
434
+ f"Key Topics: {topic_result}",
435
+ plot_path
436
+ )
437
+
438
+ def generate_knowledge_graph(conversation):
439
+ """Generate a simple knowledge graph (placeholder)"""
440
+ G = nx.DiGraph()
441
+ entities = ["AI", "Ethics", "Society", "Technology", "Future"]
442
+ for i, e1 in enumerate(entities):
443
+ for j, e2 in enumerate(entities):
444
+ if i != j and np.random.random() > 0.7:
445
+ G.add_edge(e1, e2, weight=np.random.random())
446
+
447
+ plt.figure(figsize=(10, 8))
448
+ pos = nx.spring_layout(G)
449
+ nx.draw(G, pos, with_labels=True, node_size=2000,
450
+ node_color="skyblue", font_size=10,
451
+ edge_color="gray", width=1.5)
452
+ plt.title("Knowledge Graph")
453
+ plt.savefig("knowledge_graph.png")
454
+ return "knowledge_graph.png"
455
+
456
+ def export_handler(format_radio, conversation, current_topic, turn_count):
457
+ """Export conversation in various formats"""
458
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
459
+
460
+ if format_radio == "txt":
461
+ filename = f"discussion_{timestamp}.txt"
462
+ with open(filename, "w") as f:
463
+ f.write(f"Topic: {current_topic}\nTurns: {turn_count}\n\n")
464
+ for msg in conversation:
465
+ f.write(f"{msg['agent']} (Turn {msg.get('turn', 'N/A')}):\n{msg['text']}\n\n")
466
+ return filename
467
+
468
+ elif format_radio == "pdf":
469
+ filename = f"discussion_{timestamp}.pdf"
470
+ doc = SimpleDocTemplate(filename, pagesize=letter)
471
+ styles = getSampleStyleSheet()
472
+ story = []
473
+
474
+ story.append(Paragraph(f"Discussion: {current_topic}", styles["Title"]))
475
+ story.append(Paragraph(f"Turns: {turn_count}", styles["Normal"]))
476
+ story.append(Spacer(1, 12))
477
+
478
+ for msg in conversation:
479
+ agent_text = f"<b>{msg['agent']}</b> (Turn {msg.get('turn', 'N/A')}):"
480
+ story.append(Paragraph(agent_text, styles["Normal"]))
481
+ story.append(Paragraph(msg["text"], styles["BodyText"]))
482
+ story.append(Spacer(1, 12))
483
+
484
+ doc.build(story)
485
+ return filename
486
+
487
+ elif format_radio == "json":
488
+ filename = f"discussion_{timestamp}.json"
489
+ data = {
490
+ "topic": current_topic,
491
+ "turns": turn_count,
492
+ "conversation": conversation
493
+ }
494
+ with open(filename, "w") as f:
495
+ json.dump(data, f, indent=2)
496
+ return filename
497
+
498
+ return "export_error.txt"
499
+
500
+ def send_to_webhook(webhook_url, conversation, current_topic, turn_count):
501
+ """Send conversation to webhook"""
502
+ if not webhook_url.startswith("http"):
503
+ return "⚠️ Invalid URL"
504
+
505
+ payload = {
506
+ "topic": current_topic,
507
+ "turns": turn_count,
508
+ "conversation": conversation
509
+ }
510
+
511
+ try:
512
+ response = requests.post(webhook_url, json=payload, timeout=10)
513
+ if response.status_code == 200:
514
+ return "✅ Sent successfully!"
515
+ return f"⚠️ Error: {response.status_code} - {response.text}"
516
+ except Exception as e:
517
+ return f"⚠️ Connection error: {str(e)}"
518
+
519
+ def add_user_contribution(user_input, conversation):
520
+ """Add user contribution to conversation"""
521
+ if not user_input.strip():
522
+ return format_conversation_html(conversation), "Please enter a message", conversation
523
+
524
+ new_entry = {
525
+ "agent": "User",
526
+ "text": user_input,
527
+ "turn": len(conversation) + 1
528
+ }
529
+ updated_conversation = conversation + [new_entry]
530
+ embed_and_store(user_input, "User", current_topic="")
531
+ return format_conversation_html(updated_conversation), "✅ Added your contribution!", updated_conversation
532
+
533
+ def update_agent_params(*args):
534
+ """Update agent parameters from sliders"""
535
+ # Last argument is the current params state
536
+ current_params = args[-1]
537
+ sliders = args[:-1]
538
+
539
+ # Map sliders to agent parameters
540
+ agents = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]
541
+ params = ["creativity", "criticality"]
542
+
543
+ updated_params = {}
544
+ slider_index = 0
545
+ for agent in agents:
546
+ updated_params[agent] = {}
547
+ for param in params:
548
+ updated_params[agent][param] = sliders[slider_index]
549
+ slider_index += 1
550
+
551
+ return updated_params
552
+
553
+ # === GRADIO UI SETUP ===
554
+ # ... [The entire Gradio UI block remains unchanged from the previous implementation] ...
555
+ # This includes the UI layout, components, and event handlers
556
  # === GRADIO UI ===
557
  with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as demo:
558
  gr.Markdown("# 🧠 Hexa-Agent Discussion System (Free Version)")