Nipun Claude commited on
Commit
d0f303d
Β·
1 Parent(s): bb0db22

Fix UI issues and improve user experience

Browse files

Bug fixes:
- Fix collapsible code blocks using native Streamlit expander
- Fix image display to show actual images instead of filenames
- Remove persistent "Data loaded successfully!" message

UI improvements:
- Reduce excessive whitespace and margins for tighter layout
- Move Quick Queries to top of sidebar for better accessibility
- Compact message styling with reduced padding
- Cleaner dataset info section with condensed layout

πŸ€– Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <[email protected]>

Files changed (1) hide show
  1. app.py +57 -73
app.py CHANGED
@@ -106,34 +106,24 @@ st.markdown("""
106
  .user-message {
107
  background: #3b82f6;
108
  color: white;
109
- padding: 1rem 1.5rem;
110
  border-radius: 12px;
111
- margin: 2rem 0;
112
- margin-left: auto;
113
- margin-right: 0;
114
  max-width: 70%;
115
- display: flex;
116
- justify-content: flex-end;
117
  }
118
 
119
  .user-info {
120
  font-size: 0.875rem;
121
  opacity: 0.9;
122
- margin-bottom: 5px;
123
  }
124
 
125
  /* Assistant message styling */
126
  .assistant-message {
127
  background: #f1f5f9;
128
  color: #334155;
129
- padding: 1rem 1.5rem;
130
  border-radius: 12px;
131
- margin: 2rem 0;
132
- margin-left: 0;
133
- margin-right: auto;
134
  max-width: 70%;
135
- display: flex;
136
- justify-content: flex-start;
137
  }
138
 
139
  .assistant-info {
@@ -503,7 +493,7 @@ st.markdown("<hr style='margin: 1rem 0; border: none; border-top: 1px solid #e2e
503
  # Load data with error handling
504
  try:
505
  df = preprocess_and_load_df(join(self_path, "Data.csv"))
506
- st.success("Data loaded successfully!")
507
  except Exception as e:
508
  st.error(f"Error loading data: {e}")
509
  st.stop()
@@ -511,43 +501,9 @@ except Exception as e:
511
  inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
512
  image_path = "IITGN_Logo.png"
513
 
514
- # Clean sidebar
515
  with st.sidebar:
516
- # Dataset Info Section
517
- st.markdown("### Dataset Info")
518
- st.markdown("""
519
- <div style='background-color: #f1f5f9; padding: 1rem; border-radius: 8px; margin-bottom: 1.5rem;'>
520
- <h4 style='margin: 0 0 0.5rem 0; color: #1e293b; font-size: 1rem;'>PM2.5 Air Quality Data</h4>
521
- <p style='margin: 0.25rem 0; font-size: 0.875rem;'><strong>Time Range:</strong> Daily measurements</p>
522
- <p style='margin: 0.25rem 0; font-size: 0.875rem;'><strong>Locations:</strong> Multiple cities in Gujarat</p>
523
- <p style='margin: 0.25rem 0; font-size: 0.875rem;'><strong>Records:</strong> Air quality monitoring data</p>
524
- <p style='margin: 0.25rem 0; font-size: 0.875rem;'><strong>Parameters:</strong> PM2.5, PM10, Location data</p>
525
- </div>
526
- """, unsafe_allow_html=True)
527
-
528
- # Current Model Info
529
- st.markdown("### Current Model")
530
- st.markdown(f"**{model_name}**")
531
-
532
- model_descriptions = {
533
- "llama3.1": "Fast and efficient for general queries",
534
- "llama3.3": "Most advanced LLaMA model for complex reasoning",
535
- "mistral": "Balanced performance and speed",
536
- "gemma": "Google's lightweight model",
537
- "gemini-pro": "Google's most powerful model",
538
- "gpt-oss-20b": "OpenAI's compact open-weight GPT for everyday tasks",
539
- "gpt-oss-120b": "OpenAI's massive open-weight GPT for nuanced responses",
540
- "deepseek-R1": "DeepSeek's distilled LLaMA model for efficient reasoning",
541
- "llama4 maverik": "Meta's LLaMA 4 Maverick β€” high-performance instruction model",
542
- "llama4 scout": "Meta's LLaMA 4 Scout β€” optimized for adaptive reasoning"
543
- }
544
-
545
- if model_name in model_descriptions:
546
- st.caption(model_descriptions[model_name])
547
-
548
- st.markdown("---")
549
-
550
- # Quick Queries Section
551
  st.markdown("### Quick Queries")
552
 
553
  # Load quick prompts
@@ -585,6 +541,38 @@ with st.sidebar:
585
 
586
  st.markdown("---")
587
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
  # Clear Chat Button
589
  if st.button("Clear Chat", use_container_width=True):
590
  st.session_state.responses = []
@@ -609,18 +597,18 @@ def show_custom_response(response):
609
  content = response.get("content", "")
610
 
611
  if role == "user":
612
- # User message with right alignment
613
  st.markdown(f"""
614
- <div style='display: flex; justify-content: flex-end; margin: 2rem 0;'>
615
  <div class='user-message'>
616
  {content}
617
  </div>
618
  </div>
619
  """, unsafe_allow_html=True)
620
  elif role == "assistant":
621
- # Assistant message with left alignment
622
  st.markdown(f"""
623
- <div style='display: flex; justify-content: flex-start; margin: 2rem 0;'>
624
  <div class='assistant-message'>
625
  <div class='assistant-info'>VayuChat</div>
626
  {content if isinstance(content, str) else str(content)}
@@ -628,32 +616,28 @@ def show_custom_response(response):
628
  </div>
629
  """, unsafe_allow_html=True)
630
 
631
- # Show generated code with collapsible container
632
  if response.get("gen_code"):
633
- st.markdown("""
634
- <div class='code-container'>
635
- <div class='code-header' onclick='toggleCode(this)'>
636
- <div class='code-title'>Generated Python Code</div>
637
- <div class='toggle-text'>Click to expand</div>
638
- </div>
639
- <div class='code-block' style='display: none;'>
640
- """, unsafe_allow_html=True)
641
-
642
- st.code(response["gen_code"], language="python")
643
-
644
- st.markdown("</div></div>", unsafe_allow_html=True)
645
 
646
  # Try to display image if content is a file path
647
  try:
648
  if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
649
  if os.path.exists(content):
650
- # Chart container styling
651
- st.markdown("""
652
- <div style='background: white; border: 1px solid #e2e8f0; border-radius: 8px; padding: 1.5rem; margin: 1rem 0;'>
653
- """, unsafe_allow_html=True)
654
- st.image(content)
655
- st.markdown("</div>", unsafe_allow_html=True)
656
  return {"is_image": True}
 
 
 
 
 
 
 
 
 
 
657
  except:
658
  pass
659
 
 
106
  .user-message {
107
  background: #3b82f6;
108
  color: white;
109
+ padding: 0.75rem 1rem;
110
  border-radius: 12px;
 
 
 
111
  max-width: 70%;
 
 
112
  }
113
 
114
  .user-info {
115
  font-size: 0.875rem;
116
  opacity: 0.9;
117
+ margin-bottom: 3px;
118
  }
119
 
120
  /* Assistant message styling */
121
  .assistant-message {
122
  background: #f1f5f9;
123
  color: #334155;
124
+ padding: 0.75rem 1rem;
125
  border-radius: 12px;
 
 
 
126
  max-width: 70%;
 
 
127
  }
128
 
129
  .assistant-info {
 
493
  # Load data with error handling
494
  try:
495
  df = preprocess_and_load_df(join(self_path, "Data.csv"))
496
+ # Data loaded silently - no success message needed
497
  except Exception as e:
498
  st.error(f"Error loading data: {e}")
499
  st.stop()
 
501
  inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
502
  image_path = "IITGN_Logo.png"
503
 
504
+ # Clean sidebar
505
  with st.sidebar:
506
+ # Quick Queries Section - moved to top
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
  st.markdown("### Quick Queries")
508
 
509
  # Load quick prompts
 
541
 
542
  st.markdown("---")
543
 
544
+ # Dataset Info Section
545
+ st.markdown("### Dataset Info")
546
+ st.markdown("""
547
+ <div style='background-color: #f1f5f9; padding: 0.75rem; border-radius: 6px; margin-bottom: 1rem;'>
548
+ <h4 style='margin: 0 0 0.25rem 0; color: #1e293b; font-size: 0.9rem;'>PM2.5 Air Quality Data</h4>
549
+ <p style='margin: 0.125rem 0; font-size: 0.75rem;'><strong>Locations:</strong> Gujarat cities</p>
550
+ <p style='margin: 0.125rem 0; font-size: 0.75rem;'><strong>Parameters:</strong> PM2.5, PM10</p>
551
+ </div>
552
+ """, unsafe_allow_html=True)
553
+
554
+ # Current Model Info
555
+ st.markdown("### Current Model")
556
+ st.markdown(f"**{model_name}**")
557
+
558
+ model_descriptions = {
559
+ "llama3.1": "Fast and efficient for general queries",
560
+ "llama3.3": "Most advanced LLaMA model for complex reasoning",
561
+ "mistral": "Balanced performance and speed",
562
+ "gemma": "Google's lightweight model",
563
+ "gemini-pro": "Google's most powerful model",
564
+ "gpt-oss-20b": "OpenAI's compact open-weight GPT for everyday tasks",
565
+ "gpt-oss-120b": "OpenAI's massive open-weight GPT for nuanced responses",
566
+ "deepseek-R1": "DeepSeek's distilled LLaMA model for efficient reasoning",
567
+ "llama4 maverik": "Meta's LLaMA 4 Maverick β€” high-performance instruction model",
568
+ "llama4 scout": "Meta's LLaMA 4 Scout β€” optimized for adaptive reasoning"
569
+ }
570
+
571
+ if model_name in model_descriptions:
572
+ st.caption(model_descriptions[model_name])
573
+
574
+ st.markdown("---")
575
+
576
  # Clear Chat Button
577
  if st.button("Clear Chat", use_container_width=True):
578
  st.session_state.responses = []
 
597
  content = response.get("content", "")
598
 
599
  if role == "user":
600
+ # User message with right alignment - reduced margins
601
  st.markdown(f"""
602
+ <div style='display: flex; justify-content: flex-end; margin: 1rem 0;'>
603
  <div class='user-message'>
604
  {content}
605
  </div>
606
  </div>
607
  """, unsafe_allow_html=True)
608
  elif role == "assistant":
609
+ # Assistant message with left alignment - reduced margins
610
  st.markdown(f"""
611
+ <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
612
  <div class='assistant-message'>
613
  <div class='assistant-info'>VayuChat</div>
614
  {content if isinstance(content, str) else str(content)}
 
616
  </div>
617
  """, unsafe_allow_html=True)
618
 
619
+ # Show generated code with Streamlit expander
620
  if response.get("gen_code"):
621
+ with st.expander("πŸ“‹ View Generated Code", expanded=False):
622
+ st.code(response["gen_code"], language="python")
 
 
 
 
 
 
 
 
 
 
623
 
624
  # Try to display image if content is a file path
625
  try:
626
  if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
627
  if os.path.exists(content):
628
+ # Display image without showing filename
629
+ st.image(content, use_column_width=True)
 
 
 
 
630
  return {"is_image": True}
631
+ # Also handle case where content shows filename but we want to show image
632
+ elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']):
633
+ # Extract potential filename from content
634
+ import re
635
+ filename_match = re.search(r'([^/\\]+\.(?:png|jpg|jpeg))', content)
636
+ if filename_match:
637
+ filename = filename_match.group(1)
638
+ if os.path.exists(filename):
639
+ st.image(filename, use_column_width=True)
640
+ return {"is_image": True}
641
  except:
642
  pass
643