Leonydis137 commited on
Commit
a1017cc
·
verified ·
1 Parent(s): 5a8f167

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +365 -2
app.py CHANGED
@@ -112,7 +112,6 @@ agent_params = {
112
  "Cultural": {"creativity": 0.7, "criticality": 0.6},
113
  "Judge": {"creativity": 0.4, "criticality": 0.9}
114
  }
115
-
116
  # === FREE CHAT COMPLETION API ===
117
  def safe_chat_completion(system, messages, temperature=0.7):
118
  """Use free Hugging Face Inference API"""
@@ -238,7 +237,371 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as
238
  overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False)
239
  judge_out = gr.Textbox(label="Judge's Response", interactive=False)
240
 
241
- # ... [Keep all other tabs the same as before] ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
  # Custom CSS
244
  demo.css = """
 
112
  "Cultural": {"creativity": 0.7, "criticality": 0.6},
113
  "Judge": {"creativity": 0.4, "criticality": 0.9}
114
  }
 
115
  # === FREE CHAT COMPLETION API ===
116
  def safe_chat_completion(system, messages, temperature=0.7):
117
  """Use free Hugging Face Inference API"""
 
237
  overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False)
238
  judge_out = gr.Textbox(label="Judge's Response", interactive=False)
239
 
240
+ # === COMPLETE IMPLEMENTATION ===
241
+
242
+ def overseer_respond(question, conversation, current_topic):
243
+ """Get response from Depth Guardian"""
244
+ context = f"Current Topic: {current_topic}\n\n" if current_topic else ""
245
+ context += "Conversation History:\n"
246
+ for msg in conversation[-5:]:
247
+ context += f"- {msg['agent']}: {msg['text']}\n"
248
+
249
+ response = safe_chat_completion(
250
+ system=OVERSEER_PROMPT,
251
+ messages=[{"role": "user", "content": f"{context}\nQuestion: {question}"}],
252
+ temperature=0.8
253
+ )
254
+ embed_and_store(response, "Guardian")
255
+ return response
256
+
257
+ def ask_judge(question, conversation, current_topic):
258
+ """Get ruling from Judge"""
259
+ context = f"Topic: {current_topic}\n\n" if current_topic else ""
260
+ context += "Recent Discussion:\n"
261
+ for msg in conversation[-5:]:
262
+ context += f"- {msg['agent']}: {msg['text']}\n"
263
+
264
+ response = safe_chat_completion(
265
+ system=JUDGE_PROMPT,
266
+ messages=[{"role": "user", "content": f"{context}\nSpecific Question: {question}"}],
267
+ temperature=0.6
268
+ )
269
+ embed_and_store(response, "Judge")
270
+ return response
271
+
272
+ def step(topic_input, conversation, turn_count, current_topic, last_ruling_turn, agent_params):
273
+ """Advance the discussion by one turn"""
274
+ global current_topic, auto_mode
275
+
276
+ # Set topic on first turn
277
+ if turn_count == 0:
278
+ if topic_input.strip():
279
+ current_topic = topic_input.strip()
280
+ else:
281
+ current_topic = "Ethical Implications of Advanced AI Systems"
282
+
283
+ # Determine which agent speaks
284
+ agent_sequence = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural"]
285
+ agent_index = turn_count % len(agent_sequence)
286
+ agent_name = agent_sequence[agent_index]
287
+
288
+ # Special handling for Judge
289
+ judge_interval = 5
290
+ if turn_count - last_ruling_turn >= judge_interval and turn_count > 0:
291
+ agent_name = "Judge"
292
+
293
+ # Get system prompt and temperature
294
+ prompts = {
295
+ "Initiator": AGENT_A_PROMPT,
296
+ "Responder": AGENT_B_PROMPT,
297
+ "Guardian": OVERSEER_PROMPT,
298
+ "Provocateur": OUTSIDER_PROMPT,
299
+ "Cultural": CULTURAL_LENS_PROMPT,
300
+ "Judge": JUDGE_PROMPT
301
+ }
302
+ temperature = agent_params[agent_name]["creativity"]
303
+
304
+ # Prepare context
305
+ context = f"Current Topic: {current_topic}\n\nDiscussion History:\n"
306
+ for msg in conversation[-5:]:
307
+ context += f"{msg['agent']}: {msg['text']}\n\n"
308
+
309
+ # Generate response
310
+ response = safe_chat_completion(
311
+ system=prompts[agent_name],
312
+ messages=[{"role": "user", "content": context}],
313
+ temperature=temperature
314
+ )
315
+
316
+ # Create message entry
317
+ new_entry = {
318
+ "agent": agent_name,
319
+ "text": response,
320
+ "turn": turn_count + 1
321
+ }
322
+
323
+ # Update state
324
+ updated_conversation = conversation + [new_entry]
325
+ new_turn_count = turn_count + 1
326
+ new_last_ruling_turn = new_turn_count if agent_name == "Judge" else last_ruling_turn
327
+
328
+ # Update memory
329
+ embed_and_store(response, agent_name)
330
+
331
+ # Format HTML output
332
+ html_output = format_conversation_html(updated_conversation)
333
+
334
+ # Get agent-specific displays
335
+ intervention = get_last_by_agent(updated_conversation, "Guardian")
336
+ outsider = get_last_by_agent(updated_conversation, "Provocateur")
337
+ cultural = get_last_by_agent(updated_conversation, "Cultural")
338
+ judge = get_last_by_agent(updated_conversation, "Judge")
339
+
340
+ # Prepare agent status
341
+ active_agents = " | ".join([f"{agent}: {entry['text'][:30]}..." for agent, entry in zip(
342
+ ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"],
343
+ [new_entry] * 6 # Simplified for demo
344
+ )])
345
+
346
+ return (
347
+ html_output,
348
+ intervention,
349
+ outsider,
350
+ cultural,
351
+ judge,
352
+ current_topic,
353
+ new_turn_count,
354
+ active_agents,
355
+ updated_conversation,
356
+ new_turn_count,
357
+ current_topic,
358
+ new_last_ruling_turn,
359
+ agent_params
360
+ )
361
+
362
+ def get_last_by_agent(conversation, agent_name):
363
+ """Get last message from specific agent"""
364
+ for msg in reversed(conversation):
365
+ if msg["agent"] == agent_name:
366
+ return msg["text"]
367
+ return "No message yet"
368
+
369
+ def format_conversation_html(conversation):
370
+ """Format conversation as HTML"""
371
+ html = "<div class='convo-container'>"
372
+ for msg in conversation:
373
+ agent = msg["agent"]
374
+ color_map = {
375
+ "Initiator": "#e6f7ff",
376
+ "Responder": "#f6ffed",
377
+ "Guardian": "#fff7e6",
378
+ "Provocateur": "#f9e6ff",
379
+ "Cultural": "#e6ffed",
380
+ "Judge": "#f0f0f0",
381
+ "User": "#f0f0f0"
382
+ }
383
+ color = color_map.get(agent, "#ffffff")
384
+ html += f"""
385
+ <div style='background:{color}; padding:10px; margin:10px; border-radius:5px;'>
386
+ <b>{agent}:</b> {msg['text']}
387
+ </div>
388
+ """
389
+ html += "</div>"
390
+ return html
391
+
392
+ def toggle_auto(auto_mode):
393
+ """Toggle auto-advance mode"""
394
+ new_mode = not auto_mode
395
+ return ("🟢 Auto: ON" if new_mode else "🔴 Auto: OFF", new_mode)
396
+
397
+ def clear_convo():
398
+ """Reset conversation"""
399
+ global conversation, turn_count, current_topic, last_ruling_turn
400
+ conversation = []
401
+ turn_count = 0
402
+ current_topic = ""
403
+ last_ruling_turn = 0
404
+ return (
405
+ format_conversation_html([]),
406
+ "",
407
+ "",
408
+ "",
409
+ "",
410
+ "",
411
+ 0,
412
+ "💡 Initiator, 🔍 Responder",
413
+ [],
414
+ 0,
415
+ "",
416
+ 0,
417
+ "",
418
+ ""
419
+ )
420
+
421
+ def new_topic(conversation, turn_count, current_topic):
422
+ """Generate a new discussion topic"""
423
+ # In a real implementation, this would call an LLM to generate a topic
424
+ topics = [
425
+ "The Ethics of Genetic Engineering in Humans",
426
+ "Universal Basic Income in the Age of Automation",
427
+ "Cultural Impacts of Global AI Deployment",
428
+ "Privacy vs Security in Digital Societies",
429
+ "The Future of Human-AI Collaboration"
430
+ ]
431
+ new_topic = np.random.choice(topics)
432
+ return (
433
+ format_conversation_html([]),
434
+ new_topic,
435
+ 0,
436
+ [],
437
+ 0,
438
+ new_topic
439
+ )
440
+
441
+ def request_ruling(conversation, current_topic, turn_count, last_ruling_turn):
442
+ """Request a ruling from the Judge"""
443
+ context = f"Topic: {current_topic}\n\nDiscussion Summary:\n"
444
+ for msg in conversation[-5:]:
445
+ context += f"- {msg['agent']}: {msg['text']}\n"
446
+
447
+ response = safe_chat_completion(
448
+ system=JUDGE_PROMPT,
449
+ messages=[{"role": "user", "content": f"{context}\nPlease provide a comprehensive ruling."}],
450
+ temperature=0.5
451
+ )
452
+
453
+ new_entry = {
454
+ "agent": "Judge",
455
+ "text": response,
456
+ "turn": turn_count
457
+ }
458
+ updated_conversation = conversation + [new_entry]
459
+ return response, updated_conversation, turn_count
460
+
461
+ def run_analysis(conversation):
462
+ """Run basic analysis (simplified for free version)"""
463
+ # Sentiment analysis placeholder
464
+ sentiments = ["Positive", "Neutral", "Negative"]
465
+ sentiment_result = np.random.choice(sentiments, p=[0.4, 0.4, 0.2])
466
+
467
+ # Topic extraction placeholder
468
+ topics = ["AI Ethics", "Policy", "Cultural Impact", "Technology", "Future Scenarios"]
469
+ topic_result = ", ".join(np.random.choice(topics, 3, replace=False))
470
+
471
+ # Agent participation plot
472
+ agents = [msg["agent"] for msg in conversation]
473
+ if agents:
474
+ agent_counts = {agent: agents.count(agent) for agent in set(agents)}
475
+ plt.figure(figsize=(8, 4))
476
+ plt.bar(agent_counts.keys(), agent_counts.values())
477
+ plt.title("Agent Participation")
478
+ plt.ylabel("Number of Messages")
479
+ plt.tight_layout()
480
+ plt.savefig("agent_plot.png")
481
+ plot_path = "agent_plot.png"
482
+ else:
483
+ plot_path = None
484
+
485
+ return (
486
+ f"Overall Sentiment: {sentiment_result}",
487
+ f"Key Topics: {topic_result}",
488
+ plot_path
489
+ )
490
+
491
+ def generate_knowledge_graph(conversation):
492
+ """Generate a simple knowledge graph (placeholder)"""
493
+ G = nx.DiGraph()
494
+ entities = ["AI", "Ethics", "Society", "Technology", "Future"]
495
+ for i, e1 in enumerate(entities):
496
+ for j, e2 in enumerate(entities):
497
+ if i != j and np.random.random() > 0.7:
498
+ G.add_edge(e1, e2, weight=np.random.random())
499
+
500
+ plt.figure(figsize=(10, 8))
501
+ pos = nx.spring_layout(G)
502
+ nx.draw(G, pos, with_labels=True, node_size=2000,
503
+ node_color="skyblue", font_size=10,
504
+ edge_color="gray", width=1.5)
505
+ plt.title("Knowledge Graph")
506
+ plt.savefig("knowledge_graph.png")
507
+ return "knowledge_graph.png"
508
+
509
+ def export_handler(format_radio, conversation, current_topic, turn_count):
510
+ """Export conversation in various formats"""
511
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
512
+
513
+ if format_radio == "txt":
514
+ filename = f"discussion_{timestamp}.txt"
515
+ with open(filename, "w") as f:
516
+ f.write(f"Topic: {current_topic}\nTurns: {turn_count}\n\n")
517
+ for msg in conversation:
518
+ f.write(f"{msg['agent']} (Turn {msg.get('turn', 'N/A')}):\n{msg['text']}\n\n")
519
+ return filename
520
+
521
+ elif format_radio == "pdf":
522
+ filename = f"discussion_{timestamp}.pdf"
523
+ doc = SimpleDocTemplate(filename, pagesize=letter)
524
+ styles = getSampleStyleSheet()
525
+ story = []
526
+
527
+ story.append(Paragraph(f"Discussion: {current_topic}", styles["Title"]))
528
+ story.append(Paragraph(f"Turns: {turn_count}", styles["Normal"]))
529
+ story.append(Spacer(1, 12))
530
+
531
+ for msg in conversation:
532
+ agent_text = f"<b>{msg['agent']}</b> (Turn {msg.get('turn', 'N/A')}):"
533
+ story.append(Paragraph(agent_text, styles["Normal"]))
534
+ story.append(Paragraph(msg["text"], styles["BodyText"]))
535
+ story.append(Spacer(1, 12))
536
+
537
+ doc.build(story)
538
+ return filename
539
+
540
+ elif format_radio == "json":
541
+ filename = f"discussion_{timestamp}.json"
542
+ data = {
543
+ "topic": current_topic,
544
+ "turns": turn_count,
545
+ "conversation": conversation
546
+ }
547
+ with open(filename, "w") as f:
548
+ json.dump(data, f, indent=2)
549
+ return filename
550
+
551
+ return "export_error.txt"
552
+
553
+ def send_to_webhook(webhook_url, conversation, current_topic, turn_count):
554
+ """Send conversation to webhook"""
555
+ if not webhook_url.startswith("http"):
556
+ return "⚠️ Invalid URL"
557
+
558
+ payload = {
559
+ "topic": current_topic,
560
+ "turns": turn_count,
561
+ "conversation": conversation
562
+ }
563
+
564
+ try:
565
+ response = requests.post(webhook_url, json=payload, timeout=10)
566
+ if response.status_code == 200:
567
+ return "✅ Sent successfully!"
568
+ return f"⚠️ Error: {response.status_code} - {response.text}"
569
+ except Exception as e:
570
+ return f"⚠️ Connection error: {str(e)}"
571
+
572
+ def add_user_contribution(user_input, conversation):
573
+ """Add user contribution to conversation"""
574
+ if not user_input.strip():
575
+ return format_conversation_html(conversation), "Please enter a message", conversation
576
+
577
+ new_entry = {
578
+ "agent": "User",
579
+ "text": user_input,
580
+ "turn": len(conversation) + 1
581
+ }
582
+ updated_conversation = conversation + [new_entry]
583
+ embed_and_store(user_input, "User")
584
+ return format_conversation_html(updated_conversation), "✅ Added your contribution!", updated_conversation
585
+
586
+ def update_agent_params(*args):
587
+ """Update agent parameters from sliders"""
588
+ # Last argument is the current params state
589
+ current_params = args[-1]
590
+ sliders = args[:-1]
591
+
592
+ # Map sliders to agent parameters
593
+ agents = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]
594
+ params = ["creativity", "criticality"]
595
+
596
+ updated_params = {}
597
+ slider_index = 0
598
+ for agent in agents:
599
+ updated_params[agent] = {}
600
+ for param in params:
601
+ updated_params[agent][param] = sliders[slider_index]
602
+ slider_index += 1
603
+
604
+ return updated_params
605
 
606
  # Custom CSS
607
  demo.css = """