Ali2206 commited on
Commit
fcebf54
·
verified ·
1 Parent(s): 0a3f912

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -39
app.py CHANGED
@@ -43,11 +43,14 @@ os.environ.update({
43
  "TOKENIZERS_PARALLELISM": "false",
44
  "CUDA_LAUNCH_BLOCKING": "1"
45
  })
 
 
46
  current_dir = os.path.dirname(os.path.abspath(__file__))
47
  src_path = os.path.abspath(os.path.join(current_dir, "src"))
48
  sys.path.insert(0, src_path)
49
 
50
  from txagent.txagent import TxAgent
 
51
  # ==================== CORE COMPONENTS ====================
52
  class FileProcessor:
53
  """Handles all file processing operations"""
@@ -294,7 +297,7 @@ class ClinicalAgent:
294
  full_response += cleaned + " "
295
  yield {"role": "assistant", "content": full_response}
296
 
297
- def analyze_records(self, message: str, history: List[Dict], files: List) -> Generator[Dict[str, Any], None, None]:
298
  """Main analysis workflow"""
299
  outputs = {
300
  "chatbot": history.copy(),
@@ -302,13 +305,13 @@ class ClinicalAgent:
302
  "final_summary": "",
303
  "progress": {"value": "Initializing...", "visible": True}
304
  }
305
- yield outputs
306
 
307
  try:
308
  # Add user message
309
  history.append({"role": "user", "content": message})
310
  outputs["chatbot"] = history
311
- yield outputs
312
 
313
  # Process files
314
  extracted = []
@@ -329,7 +332,7 @@ class ClinicalAgent:
329
  try:
330
  extracted.extend(future.result())
331
  outputs["progress"] = self._format_progress(i, len(files), "Processing files")
332
- yield outputs
333
  except Exception as e:
334
  logger.error(f"File processing failed: {e}")
335
  extracted.append({"error": str(e)})
@@ -342,7 +345,7 @@ class ClinicalAgent:
342
  "chatbot": history,
343
  "progress": self._format_progress(len(files), len(files), "Files processed")
344
  })
345
- yield outputs
346
 
347
  # Analyze content
348
  text_content = "\n".join(json.dumps(item) for item in extracted)
@@ -365,7 +368,7 @@ Document Excerpt (Part {idx}/{len(chunks)}):
365
  "chatbot": history,
366
  "progress": self._format_progress(idx, len(chunks), "Analyzing")
367
  })
368
- yield outputs
369
 
370
  # Stream analysis
371
  chunk_response = ""
@@ -376,7 +379,7 @@ Document Excerpt (Part {idx}/{len(chunks)}):
376
  "chatbot": history,
377
  "progress": self._format_progress(idx, len(chunks), "Analyzing")
378
  })
379
- yield outputs
380
 
381
  full_analysis += f"--- Analysis Part {idx} ---\n{chunk_response}\n"
382
  torch.cuda.empty_cache()
@@ -395,7 +398,7 @@ Document Excerpt (Part {idx}/{len(chunks)}):
395
  "final_summary": summary,
396
  "progress": {"visible": False}
397
  })
398
- yield outputs
399
 
400
  except Exception as e:
401
  logger.error(f"Analysis failed: {e}")
@@ -405,7 +408,7 @@ Document Excerpt (Part {idx}/{len(chunks)}):
405
  "final_summary": f"Error: {str(e)}",
406
  "progress": {"visible": False}
407
  })
408
- yield outputs
409
 
410
  def _format_progress(self, current: int, total: int, stage: str = "") -> Dict[str, Any]:
411
  """Format progress update for UI"""
@@ -414,6 +417,315 @@ Document Excerpt (Part {idx}/{len(chunks)}):
414
 
415
  def create_interface(self) -> gr.Blocks:
416
  """Build the Gradio interface"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  with gr.Blocks(
418
  theme=gr.themes.Soft(
419
  primary_hue="indigo",
@@ -421,30 +733,13 @@ Document Excerpt (Part {idx}/{len(chunks)}):
421
  neutral_hue="slate"
422
  ),
423
  title="Clinical Oversight Assistant",
424
- css="""
425
- .summary-panel {
426
- border-left: 4px solid #4f46e5;
427
- padding: 16px;
428
- background: #f8fafc;
429
- border-radius: 8px;
430
- margin-bottom: 16px;
431
- }
432
- .upload-area {
433
- border: 2px dashed #cbd5e1;
434
- border-radius: 8px;
435
- padding: 24px;
436
- margin: 12px 0;
437
- }
438
- .chat-container {
439
- border-radius: 8px;
440
- border: 1px solid #e2e8f0;
441
- }
442
- """
443
  ) as app:
444
  # Header
445
  gr.Markdown("""
446
  <div style='text-align: center; margin-bottom: 24px;'>
447
- <h1 style='color: #4f46e5; margin-bottom: 8px;'>🩺 Clinical Oversight Assistant</h1>
448
  <p style='color: #64748b;'>
449
  AI-powered analysis for identifying potential missed diagnoses in patient records
450
  </p>
@@ -454,7 +749,9 @@ Document Excerpt (Part {idx}/{len(chunks)}):
454
  with gr.Row(equal_height=False):
455
  # Main Chat Panel
456
  with gr.Column(scale=3):
457
- gr.Markdown("**Clinical Analysis Conversation**")
 
 
458
  chatbot = gr.Chatbot(
459
  label="",
460
  height=650,
@@ -471,14 +768,18 @@ Document Excerpt (Part {idx}/{len(chunks)}):
471
  # Results Panel
472
  with gr.Column(scale=1):
473
  with gr.Group():
474
- gr.Markdown("**Clinical Summary**")
 
 
475
  final_summary = gr.Markdown(
476
- "Analysis results will appear here...",
477
  elem_classes=["summary-panel"]
478
  )
479
 
480
  with gr.Group():
481
- gr.Markdown("**Report Export**")
 
 
482
  download_output = gr.File(
483
  label="Download Full Analysis",
484
  visible=False,
@@ -491,29 +792,36 @@ Document Excerpt (Part {idx}/{len(chunks)}):
491
  file_types=[".pdf", ".csv", ".xls", ".xlsx"],
492
  file_count="multiple",
493
  label="Upload Patient Records",
494
- elem_classes=["upload-area"]
 
495
  )
496
 
497
- with gr.Row():
498
  user_input = gr.Textbox(
499
  placeholder="Enter your clinical query or analysis request...",
500
  show_label=False,
501
  container=False,
502
  scale=7,
503
- autofocus=True
 
 
504
  )
505
  submit_btn = gr.Button(
506
  "Analyze",
507
  variant="primary",
508
  scale=1,
509
- min_width=120
 
 
510
  )
511
 
512
  # Hidden progress tracker
513
  progress_tracker = gr.Textbox(
514
  label="Analysis Progress",
515
  visible=False,
516
- interactive=False
 
 
517
  )
518
 
519
  # Event handlers
@@ -532,7 +840,7 @@ Document Excerpt (Part {idx}/{len(chunks)}):
532
  )
533
 
534
  app.load(
535
- lambda: [[], None, "", "", None, {"visible": False}],
536
  outputs=[chatbot, download_output, final_summary, user_input, file_upload, progress_tracker],
537
  queue=False
538
  )
 
43
  "TOKENIZERS_PARALLELISM": "false",
44
  "CUDA_LAUNCH_BLOCKING": "1"
45
  })
46
+
47
+ # Add src path for txagent
48
  current_dir = os.path.dirname(os.path.abspath(__file__))
49
  src_path = os.path.abspath(os.path.join(current_dir, "src"))
50
  sys.path.insert(0, src_path)
51
 
52
  from txagent.txagent import TxAgent
53
+
54
  # ==================== CORE COMPONENTS ====================
55
  class FileProcessor:
56
  """Handles all file processing operations"""
 
297
  full_response += cleaned + " "
298
  yield {"role": "assistant", "content": full_response}
299
 
300
+ def analyze_records(self, message: str, history: List[Dict], files: List) -> Generator[tuple, None, None]:
301
  """Main analysis workflow"""
302
  outputs = {
303
  "chatbot": history.copy(),
 
305
  "final_summary": "",
306
  "progress": {"value": "Initializing...", "visible": True}
307
  }
308
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
309
 
310
  try:
311
  # Add user message
312
  history.append({"role": "user", "content": message})
313
  outputs["chatbot"] = history
314
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
315
 
316
  # Process files
317
  extracted = []
 
332
  try:
333
  extracted.extend(future.result())
334
  outputs["progress"] = self._format_progress(i, len(files), "Processing files")
335
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
336
  except Exception as e:
337
  logger.error(f"File processing failed: {e}")
338
  extracted.append({"error": str(e)})
 
345
  "chatbot": history,
346
  "progress": self._format_progress(len(files), len(files), "Files processed")
347
  })
348
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
349
 
350
  # Analyze content
351
  text_content = "\n".join(json.dumps(item) for item in extracted)
 
368
  "chatbot": history,
369
  "progress": self._format_progress(idx, len(chunks), "Analyzing")
370
  })
371
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
372
 
373
  # Stream analysis
374
  chunk_response = ""
 
379
  "chatbot": history,
380
  "progress": self._format_progress(idx, len(chunks), "Analyzing")
381
  })
382
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
383
 
384
  full_analysis += f"--- Analysis Part {idx} ---\n{chunk_response}\n"
385
  torch.cuda.empty_cache()
 
398
  "final_summary": summary,
399
  "progress": {"visible": False}
400
  })
401
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
402
 
403
  except Exception as e:
404
  logger.error(f"Analysis failed: {e}")
 
408
  "final_summary": f"Error: {str(e)}",
409
  "progress": {"visible": False}
410
  })
411
+ yield (outputs["chatbot"], outputs["download_output"], outputs["final_summary"], outputs["progress"])
412
 
413
  def _format_progress(self, current: int, total: int, stage: str = "") -> Dict[str, Any]:
414
  """Format progress update for UI"""
 
417
 
418
  def create_interface(self) -> gr.Blocks:
419
  """Build the Gradio interface"""
420
+ css = """
421
+ /* ==================== BASE STYLES ==================== */
422
+ :root {
423
+ --primary-color: #4f46e5;
424
+ --primary-dark: #4338ca;
425
+ --border-radius: 8px;
426
+ --transition: all 0.3s ease;
427
+ --shadow: 0 4px 12px rgba(0,0,0,0.1);
428
+ --font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
429
+ --background: #ffffff;
430
+ --text-color: #1e293b;
431
+ --chat-bg: #f8fafc;
432
+ --message-bg: #e2e8f0;
433
+ --panel-bg: rgba(248, 250, 252, 0.9);
434
+ --panel-dark-bg: rgba(30, 41, 59, 0.9);
435
+ }
436
+
437
+ [data-theme="dark"] {
438
+ --background: #1e2a44;
439
+ --text-color: #f1f5f9;
440
+ --chat-bg: #2d3b55;
441
+ --message-bg: #475569;
442
+ --panel-bg: var(--panel-dark-bg);
443
+ }
444
+
445
+ body, .gradio-container {
446
+ font-family: var(--font-family);
447
+ background: var(--background);
448
+ color: var(--text-color);
449
+ margin: 0;
450
+ padding: 0;
451
+ transition: var(--transition);
452
+ }
453
+
454
+ /* ==================== LAYOUT ==================== */
455
+ .gradio-container {
456
+ max-width: 1200px;
457
+ margin: 0 auto;
458
+ padding: 1.5rem;
459
+ display: flex;
460
+ flex-direction: column;
461
+ gap: 1.5rem;
462
+ }
463
+
464
+ .chat-container {
465
+ background: var(--chat-bg);
466
+ border-radius: var(--border-radius);
467
+ border: 1px solid #e2e8f0;
468
+ padding: 1.5rem;
469
+ min-height: 50vh;
470
+ max-height: 80vh;
471
+ overflow-y: auto;
472
+ box-shadow: var(--shadow);
473
+ margin-bottom: 4rem;
474
+ }
475
+
476
+ .summary-panel {
477
+ background: var(--panel-bg);
478
+ border-left: 4px solid var(--primary-color);
479
+ padding: 1rem;
480
+ border-radius: var(--border-radius);
481
+ margin-bottom: 1rem;
482
+ box-shadow: var(--shadow);
483
+ backdrop-filter: blur(8px);
484
+ }
485
+
486
+ .upload-area {
487
+ border: 2px dashed #cbd5e1;
488
+ border-radius: var(--border-radius);
489
+ padding: 1.5rem;
490
+ margin: 0.75rem 0;
491
+ transition: var(--transition);
492
+ }
493
+
494
+ .upload-area:hover {
495
+ border-color: var(--primary-color);
496
+ background: rgba(79, 70, 229, 0.05);
497
+ }
498
+
499
+ /* ==================== COMPONENTS ==================== */
500
+ .chat__message {
501
+ margin: 0.75rem 0;
502
+ padding: 0.75rem 1rem;
503
+ border-radius: var(--border-radius);
504
+ max-width: 85%;
505
+ transition: var(--transition);
506
+ background: var(--message-bg);
507
+ border: 1px solid rgba(0,0,0,0.05);
508
+ animation: messageFade 0.3s ease;
509
+ }
510
+
511
+ .chat__message:hover {
512
+ transform: translateY(-2px);
513
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
514
+ }
515
+
516
+ .chat__message.user {
517
+ background: linear-gradient(135deg, var(--primary-color), var(--primary-dark));
518
+ color: white;
519
+ margin-left: auto;
520
+ }
521
+
522
+ .chat__message.assistant {
523
+ background: var(--message-bg);
524
+ color: var(--text-color);
525
+ }
526
+
527
+ .input-container {
528
+ display: flex;
529
+ align-items: center;
530
+ gap: 0.75rem;
531
+ background: var(--chat-bg);
532
+ padding: 0.75rem 1rem;
533
+ border-radius: 1.5rem;
534
+ box-shadow: var(--shadow);
535
+ position: sticky;
536
+ bottom: 1rem;
537
+ z-index: 10;
538
+ }
539
+
540
+ .input__textbox {
541
+ flex-grow: 1;
542
+ border: none;
543
+ background: transparent;
544
+ color: var(--text-color);
545
+ outline: none;
546
+ font-size: 1rem;
547
+ }
548
+
549
+ .input__textbox:focus {
550
+ border-bottom: 2px solid var(--primary-color);
551
+ }
552
+
553
+ .submit-btn {
554
+ background: linear-gradient(135deg, var(--primary-color), var(--primary-dark));
555
+ color: white;
556
+ border: none;
557
+ border-radius: 1rem;
558
+ padding: 0.5rem 1.25rem;
559
+ font-size: 0.9rem;
560
+ transition: var(--transition);
561
+ }
562
+
563
+ .submit-btn:hover {
564
+ transform: scale(1.05);
565
+ }
566
+
567
+ .submit-btn:active {
568
+ animation: glow 0.3s ease;
569
+ }
570
+
571
+ .tooltip {
572
+ position: relative;
573
+ }
574
+
575
+ .tooltip:hover::after {
576
+ content: attr(data-tip);
577
+ position: absolute;
578
+ top: -2.5rem;
579
+ left: 50%;
580
+ transform: translateX(-50%);
581
+ background: #1e293b;
582
+ color: white;
583
+ padding: 0.4rem 0.8rem;
584
+ border-radius: 0.4rem;
585
+ font-size: 0.85rem;
586
+ max-width: 200px;
587
+ white-space: normal;
588
+ text-align: center;
589
+ z-index: 1000;
590
+ animation: fadeIn 0.3s ease;
591
+ }
592
+
593
+ .progress-tracker {
594
+ position: relative;
595
+ padding: 0.5rem;
596
+ background: var(--message-bg);
597
+ border-radius: var(--border-radius);
598
+ margin-top: 0.75rem;
599
+ overflow: hidden;
600
+ }
601
+
602
+ .progress-tracker::before {
603
+ content: '';
604
+ position: absolute;
605
+ top: 0;
606
+ left: 0;
607
+ height: 100%;
608
+ width: 0;
609
+ background: linear-gradient(to right, var(--primary-color), var(--primary-dark));
610
+ opacity: 0.3;
611
+ animation: progress 2s ease-in-out infinite;
612
+ }
613
+
614
+ /* ==================== ANIMATIONS ==================== */
615
+ @keyframes glow {
616
+ 0%, 100% { transform: scale(1); opacity: 1; }
617
+ 50% { transform: scale(1.1); opacity: 0.8; }
618
+ }
619
+
620
+ @keyframes fadeIn {
621
+ from { opacity: 0; }
622
+ to { opacity: 1; }
623
+ }
624
+
625
+ @keyframes messageFade {
626
+ from { opacity: 0; transform: translateY(10px) scale(0.95); }
627
+ to { opacity: 1; transform: translateY(0) scale(1); }
628
+ }
629
+
630
+ @keyframes progress {
631
+ 0% { width: 0; }
632
+ 50% { width: 60%; }
633
+ 100% { width: 0; }
634
+ }
635
+
636
+ /* ==================== THEMES ==================== */
637
+ [data-theme="dark"] .chat-container {
638
+ border-color: #475569;
639
+ }
640
+
641
+ [data-theme="dark"] .upload-area {
642
+ border-color: #64748b;
643
+ }
644
+
645
+ [data-theme="dark"] .upload-area:hover {
646
+ background: rgba(79, 70, 229, 0.1);
647
+ }
648
+
649
+ [data-theme="dark"] .summary-panel {
650
+ border-left-color: #818cf8;
651
+ }
652
+
653
+ /* ==================== MEDIA QUERIES ==================== */
654
+ @media (max-width: 768px) {
655
+ .gradio-container {
656
+ padding: 1rem;
657
+ }
658
+
659
+ .chat-container {
660
+ min-height: 40vh;
661
+ max-height: 70vh;
662
+ margin-bottom: 3.5rem;
663
+ }
664
+
665
+ .summary-panel {
666
+ padding: 0.75rem;
667
+ }
668
+
669
+ .upload-area {
670
+ padding: 1rem;
671
+ }
672
+
673
+ .input-container {
674
+ gap: 0.5rem;
675
+ padding: 0.5rem;
676
+ }
677
+
678
+ .submit-btn {
679
+ padding: 0.4rem 1rem;
680
+ }
681
+ }
682
+
683
+ @media (max-width: 480px) {
684
+ .chat-container {
685
+ padding: 1rem;
686
+ margin-bottom: 3rem;
687
+ }
688
+
689
+ .input-container {
690
+ flex-direction: column;
691
+ padding: 0.5rem;
692
+ }
693
+
694
+ .input__textbox {
695
+ font-size: 0.9rem;
696
+ }
697
+
698
+ .submit-btn {
699
+ width: 100%;
700
+ padding: 0.5rem;
701
+ font-size: 0.85rem;
702
+ }
703
+
704
+ .chat__message {
705
+ max-width: 90%;
706
+ padding: 0.5rem 0.75rem;
707
+ }
708
+
709
+ .tooltip:hover::after {
710
+ top: auto;
711
+ bottom: -2.5rem;
712
+ max-width: 80vw;
713
+ }
714
+ }
715
+ """
716
+
717
+ js = """
718
+ function applyTheme(theme) {
719
+ document.documentElement.setAttribute('data-theme', theme);
720
+ localStorage.setItem('theme', theme);
721
+ }
722
+
723
+ document.addEventListener('DOMContentLoaded', () => {
724
+ const savedTheme = localStorage.getItem('theme') || 'light';
725
+ applyTheme(savedTheme);
726
+ });
727
+ """
728
+
729
  with gr.Blocks(
730
  theme=gr.themes.Soft(
731
  primary_hue="indigo",
 
733
  neutral_hue="slate"
734
  ),
735
  title="Clinical Oversight Assistant",
736
+ css=css,
737
+ js=js
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738
  ) as app:
739
  # Header
740
  gr.Markdown("""
741
  <div style='text-align: center; margin-bottom: 24px;'>
742
+ <h1 style='color: var(--primary-color); margin-bottom: 8px;'>🩺 Clinical Oversight Assistant</h1>
743
  <p style='color: #64748b;'>
744
  AI-powered analysis for identifying potential missed diagnoses in patient records
745
  </p>
 
749
  with gr.Row(equal_height=False):
750
  # Main Chat Panel
751
  with gr.Column(scale=3):
752
+ gr.Markdown(
753
+ "<div class='tooltip' data-tip='View conversation history'>**Clinical Analysis Conversation**</div>"
754
+ )
755
  chatbot = gr.Chatbot(
756
  label="",
757
  height=650,
 
768
  # Results Panel
769
  with gr.Column(scale=1):
770
  with gr.Group():
771
+ gr.Markdown(
772
+ "<div class='tooltip' data-tip='Summary of findings'>**Clinical Summary**</div>"
773
+ )
774
  final_summary = gr.Markdown(
775
+ "<div class='tooltip' data-tip='Analysis results'>Analysis results will appear here...</div>",
776
  elem_classes=["summary-panel"]
777
  )
778
 
779
  with gr.Group():
780
+ gr.Markdown(
781
+ "<div class='tooltip' data-tip='Download report'>**Report Export**</div>"
782
+ )
783
  download_output = gr.File(
784
  label="Download Full Analysis",
785
  visible=False,
 
792
  file_types=[".pdf", ".csv", ".xls", ".xlsx"],
793
  file_count="multiple",
794
  label="Upload Patient Records",
795
+ elem_classes=["upload-area"],
796
+ elem_id="file-upload"
797
  )
798
 
799
+ with gr.Row(elem_classes=["input-container"]):
800
  user_input = gr.Textbox(
801
  placeholder="Enter your clinical query or analysis request...",
802
  show_label=False,
803
  container=False,
804
  scale=7,
805
+ autofocus=True,
806
+ elem_classes=["input__textbox"],
807
+ elem_id="user-input"
808
  )
809
  submit_btn = gr.Button(
810
  "Analyze",
811
  variant="primary",
812
  scale=1,
813
+ min_width=120,
814
+ elem_classes=["submit-btn"],
815
+ elem_id="submit-btn"
816
  )
817
 
818
  # Hidden progress tracker
819
  progress_tracker = gr.Textbox(
820
  label="Analysis Progress",
821
  visible=False,
822
+ interactive=False,
823
+ elem_classes=["progress-tracker"],
824
+ elem_id="progress-tracker"
825
  )
826
 
827
  # Event handlers
 
840
  )
841
 
842
  app.load(
843
+ lambda: [[], None, "<div class='tooltip' data-tip='Analysis results'>Analysis results will appear here...</div>", "", None, {"visible": False}],
844
  outputs=[chatbot, download_output, final_summary, user_input, file_upload, progress_tracker],
845
  queue=False
846
  )