Leonydis137 commited on
Commit
b5ad884
·
verified ·
1 Parent(s): e02229b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -410
app.py CHANGED
@@ -188,12 +188,7 @@ def retrieve_relevant_memory(query, k=3):
188
  print(f"Memory retrieval error: {str(e)}")
189
  return []
190
 
191
- # ... [Rest of the functions remain the same as previous implementation] ...
192
- # Keep all the functions from the previous implementation except:
193
- # - safe_chat_completion (already replaced above)
194
- # - get_embedding (already replaced above)
195
-
196
- # ... [Keep all imports, config, and function definitions above] ...
197
 
198
  # === GRADIO UI ===
199
  with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as demo:
@@ -221,6 +216,12 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as
221
  elem_id="convo-display"
222
  )
223
 
 
 
 
 
 
 
224
  with gr.Row():
225
  step_btn = gr.Button("▶️ Next Turn", variant="primary")
226
  auto_btn = gr.Button("🔴 Auto: OFF", variant="secondary")
@@ -236,390 +237,54 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as
236
  with gr.Row():
237
  overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False)
238
  judge_out = gr.Textbox(label="Judge's Response", interactive=False)
 
 
 
239
 
240
- # === COMPLETE IMPLEMENTATION ===
241
-
242
- def overseer_respond(question, conversation, current_topic):
243
- """Get response from Depth Guardian"""
244
- context = f"Current Topic: {current_topic}\n\n" if current_topic else ""
245
- context += "Conversation History:\n"
246
- for msg in conversation[-5:]:
247
- context += f"- {msg['agent']}: {msg['text']}\n"
248
-
249
- response = safe_chat_completion(
250
- system=OVERSEER_PROMPT,
251
- messages=[{"role": "user", "content": f"{context}\nQuestion: {question}"}],
252
- temperature=0.8
253
- )
254
- embed_and_store(response, "Guardian")
255
- return response
256
-
257
- def ask_judge(question, conversation, current_topic):
258
- """Get ruling from Judge"""
259
- context = f"Topic: {current_topic}\n\n" if current_topic else ""
260
- context += "Recent Discussion:\n"
261
- for msg in conversation[-5:]:
262
- context += f"- {msg['agent']}: {msg['text']}\n"
263
-
264
- response = safe_chat_completion(
265
- system=JUDGE_PROMPT,
266
- messages=[{"role": "user", "content": f"{context}\nSpecific Question: {question}"}],
267
- temperature=0.6
268
- )
269
-
270
- def step(topic_input, conversation, turn_count, current_topic, last_ruling_turn, agent_params):
271
- """Advance the discussion by one turn"""
272
- # Remove global declarations - we'll use the parameters directly
273
- # Set topic on first turn
274
- if turn_count == 0:
275
- if topic_input.strip():
276
- current_topic = topic_input.strip()
277
- else:
278
- current_topic = "Ethical Implications of Advanced AI Systems"
279
-
280
- # Determine which agent speaks
281
- agent_sequence = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural"]
282
- agent_index = turn_count % len(agent_sequence)
283
- agent_name = agent_sequence[agent_index]
284
-
285
- # Special handling for Judge
286
- judge_interval = 5
287
- if turn_count - last_ruling_turn >= judge_interval and turn_count > 0:
288
- agent_name = "Judge"
289
-
290
- # Get system prompt and temperature
291
- prompts = {
292
- "Initiator": AGENT_A_PROMPT,
293
- "Responder": AGENT_B_PROMPT,
294
- "Guardian": OVERSEER_PROMPT,
295
- "Provocateur": OUTSIDER_PROMPT,
296
- "Cultural": CULTURAL_LENS_PROMPT,
297
- "Judge": JUDGE_PROMPT
298
- }
299
- temperature = agent_params[agent_name]["creativity"]
300
-
301
- # Prepare context
302
- context = f"Current Topic: {current_topic}\n\nDiscussion History:\n"
303
- for msg in conversation[-5:]:
304
- context += f"{msg['agent']}: {msg['text']}\n\n"
305
-
306
- # Generate response
307
- response = safe_chat_completion(
308
- system=prompts[agent_name],
309
- messages=[{"role": "user", "content": context}],
310
- temperature=temperature
311
- )
312
-
313
- # Create message entry
314
- new_entry = {
315
- "agent": agent_name,
316
- "text": response,
317
- "turn": turn_count + 1
318
- }
319
-
320
- # Update state
321
- updated_conversation = conversation + [new_entry]
322
- new_turn_count = turn_count + 1
323
- new_last_ruling_turn = new_turn_count if agent_name == "Judge" else last_ruling_turn
324
-
325
- # Update memory
326
- embed_and_store(response, agent_name, current_topic) # Pass current_topic here
327
-
328
- # Format HTML output
329
- html_output = format_conversation_html(updated_conversation)
330
-
331
- # Get agent-specific displays
332
- intervention = get_last_by_agent(updated_conversation, "Guardian")
333
- outsider = get_last_by_agent(updated_conversation, "Provocateur")
334
- cultural = get_last_by_agent(updated_conversation, "Cultural")
335
- judge = get_last_by_agent(updated_conversation, "Judge")
336
-
337
- # Prepare agent status
338
- active_agents = " | ".join([f"{agent}: {entry['text'][:30]}..." for agent, entry in zip(
339
- ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"],
340
- [new_entry] * 6 # Simplified for demo
341
- )])
342
-
343
- return (
344
- html_output,
345
- intervention,
346
- outsider,
347
- cultural,
348
- judge,
349
- current_topic,
350
- new_turn_count,
351
- active_agents,
352
- updated_conversation,
353
- new_turn_count,
354
- current_topic,
355
- new_last_ruling_turn,
356
- agent_params
357
- )
358
-
359
- # Update embed_and_store to accept topic as parameter
360
- def embed_and_store(text, agent=None, topic=""):
361
- """Store text with associated topic"""
362
- try:
363
- vec = get_embedding(text)
364
- memory_index.add(np.array([vec], dtype='float32'))
365
- memory_data.append({
366
- "text": text,
367
- "timestamp": datetime.now().isoformat(),
368
- "agent": agent or "system",
369
- "topic": topic
370
- })
371
- if len(memory_data) % 5 == 0:
372
- with open(MEMORY_FILE, "wb") as f:
373
- pickle.dump(memory_data, f)
374
- faiss.write_index(memory_index, INDEX_FILE)
375
- except Exception as e:
376
- print(f"Memory Error: {str(e)}")
377
-
378
- # ... [Rest of the functions remain unchanged] ...
379
- def get_last_by_agent(conversation, agent_name):
380
- """Get last message from specific agent"""
381
- for msg in reversed(conversation):
382
- if msg["agent"] == agent_name:
383
- return msg["text"]
384
- return "No message yet"
385
-
386
- def format_conversation_html(conversation):
387
- """Format conversation as HTML"""
388
- html = "<div class='convo-container'>"
389
- for msg in conversation:
390
- agent = msg["agent"]
391
- color_map = {
392
- "Initiator": "#e6f7ff",
393
- "Responder": "#f6ffed",
394
- "Guardian": "#fff7e6",
395
- "Provocateur": "#f9e6ff",
396
- "Cultural": "#e6ffed",
397
- "Judge": "#f0f0f0",
398
- "User": "#f0f0f0"
399
- }
400
- color = color_map.get(agent, "#ffffff")
401
- html += f"""
402
- <div style='background:{color}; padding:10px; margin:10px; border-radius:5px;'>
403
- <b>{agent}:</b> {msg['text']}
404
- </div>
405
- """
406
- html += "</div>"
407
- return html
408
-
409
- def toggle_auto(auto_mode):
410
- """Toggle auto-advance mode"""
411
- new_mode = not auto_mode
412
- return ("🟢 Auto: ON" if new_mode else "🔴 Auto: OFF", new_mode)
413
-
414
- def clear_convo():
415
- """Reset conversation"""
416
- global conversation, turn_count, current_topic, last_ruling_turn
417
- conversation = []
418
- turn_count = 0
419
- current_topic = ""
420
- last_ruling_turn = 0
421
- return (
422
- format_conversation_html([]),
423
- "",
424
- "",
425
- "",
426
- "",
427
- "",
428
- 0,
429
- "💡 Initiator, 🔍 Responder",
430
- [],
431
- 0,
432
- "",
433
- 0,
434
- "",
435
- ""
436
- )
437
-
438
- def new_topic(conversation, turn_count, current_topic):
439
- """Generate a new discussion topic"""
440
- # In a real implementation, this would call an LLM to generate a topic
441
- topics = [
442
- "The Ethics of Genetic Engineering in Humans",
443
- "Universal Basic Income in the Age of Automation",
444
- "Cultural Impacts of Global AI Deployment",
445
- "Privacy vs Security in Digital Societies",
446
- "The Future of Human-AI Collaboration"
447
- ]
448
- new_topic = np.random.choice(topics)
449
- return (
450
- format_conversation_html([]),
451
- new_topic,
452
- 0,
453
- [],
454
- 0,
455
- new_topic
456
- )
457
-
458
- def request_ruling(conversation, current_topic, turn_count, last_ruling_turn):
459
- """Request a ruling from the Judge"""
460
- context = f"Topic: {current_topic}\n\nDiscussion Summary:\n"
461
- for msg in conversation[-5:]:
462
- context += f"- {msg['agent']}: {msg['text']}\n"
463
-
464
- response = safe_chat_completion(
465
- system=JUDGE_PROMPT,
466
- messages=[{"role": "user", "content": f"{context}\nPlease provide a comprehensive ruling."}],
467
- temperature=0.5
468
- )
469
-
470
- new_entry = {
471
- "agent": "Judge",
472
- "text": response,
473
- "turn": turn_count
474
- }
475
- updated_conversation = conversation + [new_entry]
476
- return response, updated_conversation, turn_count
477
-
478
- def run_analysis(conversation):
479
- """Run basic analysis (simplified for free version)"""
480
- # Sentiment analysis placeholder
481
- sentiments = ["Positive", "Neutral", "Negative"]
482
- sentiment_result = np.random.choice(sentiments, p=[0.4, 0.4, 0.2])
483
-
484
- # Topic extraction placeholder
485
- topics = ["AI Ethics", "Policy", "Cultural Impact", "Technology", "Future Scenarios"]
486
- topic_result = ", ".join(np.random.choice(topics, 3, replace=False))
487
-
488
- # Agent participation plot
489
- agents = [msg["agent"] for msg in conversation]
490
- if agents:
491
- agent_counts = {agent: agents.count(agent) for agent in set(agents)}
492
- plt.figure(figsize=(8, 4))
493
- plt.bar(agent_counts.keys(), agent_counts.values())
494
- plt.title("Agent Participation")
495
- plt.ylabel("Number of Messages")
496
- plt.tight_layout()
497
- plt.savefig("agent_plot.png")
498
- plot_path = "agent_plot.png"
499
- else:
500
- plot_path = None
501
-
502
- return (
503
- f"Overall Sentiment: {sentiment_result}",
504
- f"Key Topics: {topic_result}",
505
- plot_path
506
- )
507
-
508
- def generate_knowledge_graph(conversation):
509
- """Generate a simple knowledge graph (placeholder)"""
510
- G = nx.DiGraph()
511
- entities = ["AI", "Ethics", "Society", "Technology", "Future"]
512
- for i, e1 in enumerate(entities):
513
- for j, e2 in enumerate(entities):
514
- if i != j and np.random.random() > 0.7:
515
- G.add_edge(e1, e2, weight=np.random.random())
516
-
517
- plt.figure(figsize=(10, 8))
518
- pos = nx.spring_layout(G)
519
- nx.draw(G, pos, with_labels=True, node_size=2000,
520
- node_color="skyblue", font_size=10,
521
- edge_color="gray", width=1.5)
522
- plt.title("Knowledge Graph")
523
- plt.savefig("knowledge_graph.png")
524
- return "knowledge_graph.png"
525
-
526
- def export_handler(format_radio, conversation, current_topic, turn_count):
527
- """Export conversation in various formats"""
528
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
529
-
530
- if format_radio == "txt":
531
- filename = f"discussion_{timestamp}.txt"
532
- with open(filename, "w") as f:
533
- f.write(f"Topic: {current_topic}\nTurns: {turn_count}\n\n")
534
- for msg in conversation:
535
- f.write(f"{msg['agent']} (Turn {msg.get('turn', 'N/A')}):\n{msg['text']}\n\n")
536
- return filename
537
-
538
- elif format_radio == "pdf":
539
- filename = f"discussion_{timestamp}.pdf"
540
- doc = SimpleDocTemplate(filename, pagesize=letter)
541
- styles = getSampleStyleSheet()
542
- story = []
543
-
544
- story.append(Paragraph(f"Discussion: {current_topic}", styles["Title"]))
545
- story.append(Paragraph(f"Turns: {turn_count}", styles["Normal"]))
546
- story.append(Spacer(1, 12))
547
 
548
- for msg in conversation:
549
- agent_text = f"<b>{msg['agent']}</b> (Turn {msg.get('turn', 'N/A')}):"
550
- story.append(Paragraph(agent_text, styles["Normal"]))
551
- story.append(Paragraph(msg["text"], styles["BodyText"]))
552
- story.append(Spacer(1, 12))
 
 
 
 
 
 
553
 
554
- doc.build(story)
555
- return filename
 
 
 
 
 
 
 
 
 
556
 
557
- elif format_radio == "json":
558
- filename = f"discussion_{timestamp}.json"
559
- data = {
560
- "topic": current_topic,
561
- "turns": turn_count,
562
- "conversation": conversation
563
- }
564
- with open(filename, "w") as f:
565
- json.dump(data, f, indent=2)
566
- return filename
567
-
568
- return "export_error.txt"
569
-
570
- def send_to_webhook(webhook_url, conversation, current_topic, turn_count):
571
- """Send conversation to webhook"""
572
- if not webhook_url.startswith("http"):
573
- return "⚠️ Invalid URL"
574
-
575
- payload = {
576
- "topic": current_topic,
577
- "turns": turn_count,
578
- "conversation": conversation
579
- }
580
-
581
- try:
582
- response = requests.post(webhook_url, json=payload, timeout=10)
583
- if response.status_code == 200:
584
- return "✅ Sent successfully!"
585
- return f"⚠️ Error: {response.status_code} - {response.text}"
586
- except Exception as e:
587
- return f"⚠️ Connection error: {str(e)}"
588
-
589
- def add_user_contribution(user_input, conversation):
590
- """Add user contribution to conversation"""
591
- if not user_input.strip():
592
- return format_conversation_html(conversation), "Please enter a message", conversation
593
-
594
- new_entry = {
595
- "agent": "User",
596
- "text": user_input,
597
- "turn": len(conversation) + 1
598
- }
599
- updated_conversation = conversation + [new_entry]
600
- embed_and_store(user_input, "User")
601
- return format_conversation_html(updated_conversation), "✅ Added your contribution!", updated_conversation
602
-
603
- def update_agent_params(*args):
604
- """Update agent parameters from sliders"""
605
- # Last argument is the current params state
606
- current_params = args[-1]
607
- sliders = args[:-1]
608
-
609
- # Map sliders to agent parameters
610
- agents = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]
611
- params = ["creativity", "criticality"]
612
-
613
- updated_params = {}
614
- slider_index = 0
615
- for agent in agents:
616
- updated_params[agent] = {}
617
- for param in params:
618
- updated_params[agent][param] = sliders[slider_index]
619
- slider_index += 1
620
-
621
- return updated_params
622
-
623
  # Custom CSS
624
  demo.css = """
625
  .convo-container {
@@ -671,12 +336,28 @@ def update_agent_params(*args):
671
 
672
  step_btn.click(
673
  step,
674
- inputs=[topic_input, conversation_state, turn_count_state, current_topic_state, last_ruling_turn_state, agent_params_state],
 
 
 
 
 
 
 
675
  outputs=[
676
- convo_display, intervention_display, outsider_display,
677
- cultural_display, judge_display, topic_display, turn_counter,
678
- agent_status, conversation_state, turn_count_state, current_topic_state,
679
- last_ruling_turn_state
 
 
 
 
 
 
 
 
 
680
  ]
681
  )
682
 
@@ -689,19 +370,35 @@ def update_agent_params(*args):
689
  clear_btn.click(
690
  clear_convo,
691
  outputs=[
692
- convo_display, intervention_display, outsider_display,
693
- cultural_display, judge_display, topic_display, turn_counter,
694
- agent_status, conversation_state, turn_count_state, current_topic_state,
695
- last_ruling_turn_state, overseer_out, judge_out
 
 
 
 
 
 
 
 
 
 
 
696
  ]
697
  )
698
 
699
  topic_btn.click(
700
  new_topic,
701
- inputs=[conversation_state, turn_count_state, current_topic_state],
702
  outputs=[
703
- convo_display, topic_display, turn_counter, conversation_state,
704
- turn_count_state, current_topic_state
 
 
 
 
 
705
  ]
706
  )
707
 
@@ -741,20 +438,10 @@ def update_agent_params(*args):
741
  outputs=[convo_display, user_feedback, conversation_state]
742
  )
743
 
744
- voting_btn.click(
745
- lambda: "✅ Your vote has been recorded!",
746
- outputs=[user_feedback]
747
- )
748
-
749
- flag_btn.click(
750
- lambda: "🚩 Issue flagged for moderator review",
751
- outputs=[user_feedback]
752
- )
753
-
754
  # Create input list for slider change events
755
  slider_inputs = [agent_sliders[f"{agent}_{param}"]
756
- for agent in ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]
757
- for param in ["creativity", "critical"]]
758
 
759
  for slider in slider_inputs:
760
  slider.change(
 
188
  print(f"Memory retrieval error: {str(e)}")
189
  return []
190
 
191
+ # ... [Previous code remains unchanged] ...
 
 
 
 
 
192
 
193
  # === GRADIO UI ===
194
  with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Discussion Platform") as demo:
 
216
  elem_id="convo-display"
217
  )
218
 
219
+ # Placeholder displays for agent-specific content
220
+ intervention_display = gr.Textbox(label="Depth Guardian Intervention", visible=False)
221
+ outsider_display = gr.Textbox(label="Cross-Disciplinary Provocateur", visible=False)
222
+ cultural_display = gr.Textbox(label="Cultural Perspective", visible=False)
223
+ judge_display = gr.Textbox(label="Judge's Ruling", visible=False)
224
+
225
  with gr.Row():
226
  step_btn = gr.Button("▶️ Next Turn", variant="primary")
227
  auto_btn = gr.Button("🔴 Auto: OFF", variant="secondary")
 
237
  with gr.Row():
238
  overseer_out = gr.Textbox(label="Depth Guardian Response", interactive=False)
239
  judge_out = gr.Textbox(label="Judge's Response", interactive=False)
240
+ submit_btn = gr.Button("💬 Add User Contribution", variant="primary")
241
+ user_input = gr.Textbox(label="Your Contribution", placeholder="Add your perspective to the discussion...")
242
+ user_feedback = gr.Textbox(label="Feedback", interactive=False, visible=False)
243
 
244
+ # Analysis tab
245
+ with gr.Tab("Analysis"):
246
+ with gr.Row():
247
+ sentiment_display = gr.Textbox(label="Sentiment Analysis", interactive=False)
248
+ topics_display = gr.Textbox(label="Key Topics", interactive=False)
249
+ agent_plot = gr.Image(label="Agent Participation")
250
+ graph_display = gr.Image(label="Knowledge Graph")
251
+ analysis_btn = gr.Button("📊 Run Analysis", variant="primary")
252
+ graph_btn = gr.Button("🌐 Generate Knowledge Graph", variant="secondary")
253
+
254
+ # Export tab
255
+ with gr.Tab("Export"):
256
+ format_radio = gr.Radio(
257
+ choices=["txt", "pdf", "json"],
258
+ label="Export Format",
259
+ value="txt"
260
+ )
261
+ export_btn = gr.Button("💾 Export Discussion", variant="primary")
262
+ export_result = gr.File(label="Export Result", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
+ gr.Markdown("### Webhook Integration")
265
+ webhook_url = gr.Textbox(label="Webhook URL", placeholder="https://your-webhook-endpoint.com")
266
+ integrate_btn = gr.Button("🔌 Send to Webhook", variant="secondary")
267
+ integration_status = gr.Textbox(label="Status", interactive=False)
268
+
269
+ # Agent Configuration tab
270
+ with gr.Tab("Configuration"):
271
+ gr.Markdown("### Agent Parameters")
272
+ agent_sliders = {}
273
+ agents = ["Initiator", "Responder", "Guardian", "Provocateur", "Cultural", "Judge"]
274
+ params = ["creativity", "criticality"]
275
 
276
+ for agent in agents:
277
+ with gr.Accordion(f"{agent} Settings", open=False):
278
+ for param in params:
279
+ slider = gr.Slider(
280
+ minimum=0.0,
281
+ maximum=1.0,
282
+ step=0.05,
283
+ label=f"{agent} {param.capitalize()}",
284
+ value=agent_params[agent][param]
285
+ )
286
+ agent_sliders[f"{agent}_{param}"] = slider
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  # Custom CSS
289
  demo.css = """
290
  .convo-container {
 
336
 
337
  step_btn.click(
338
  step,
339
+ inputs=[
340
+ topic_input,
341
+ conversation_state,
342
+ turn_count_state,
343
+ current_topic_state,
344
+ last_ruling_turn_state,
345
+ agent_params_state
346
+ ],
347
  outputs=[
348
+ convo_display,
349
+ intervention_display,
350
+ outsider_display,
351
+ cultural_display,
352
+ judge_display,
353
+ topic_display,
354
+ turn_counter,
355
+ agent_status,
356
+ conversation_state,
357
+ turn_count_state,
358
+ current_topic_state,
359
+ last_ruling_turn_state,
360
+ agent_params_state
361
  ]
362
  )
363
 
 
370
  clear_btn.click(
371
  clear_convo,
372
  outputs=[
373
+ convo_display,
374
+ intervention_display,
375
+ outsider_display,
376
+ cultural_display,
377
+ judge_display,
378
+ topic_display,
379
+ turn_counter,
380
+ agent_status,
381
+ conversation_state,
382
+ turn_count_state,
383
+ current_topic_state,
384
+ last_ruling_turn_state,
385
+ overseer_out,
386
+ judge_out,
387
+ agent_params_state
388
  ]
389
  )
390
 
391
  topic_btn.click(
392
  new_topic,
393
+ inputs=[conversation_state, turn_count_state, current_topic_state, last_ruling_turn_state],
394
  outputs=[
395
+ convo_display,
396
+ topic_display,
397
+ turn_counter,
398
+ conversation_state,
399
+ turn_count_state,
400
+ current_topic_state,
401
+ last_ruling_turn_state
402
  ]
403
  )
404
 
 
438
  outputs=[convo_display, user_feedback, conversation_state]
439
  )
440
 
 
 
 
 
 
 
 
 
 
 
441
  # Create input list for slider change events
442
  slider_inputs = [agent_sliders[f"{agent}_{param}"]
443
+ for agent in agents
444
+ for param in params]
445
 
446
  for slider in slider_inputs:
447
  slider.change(