Make VayuChat absolutely awesome for screenshots and demos
Browse filesPROFESSIONAL BRANDING & UI:
- Add Sustainability Lab logo from GitHub site in beautiful header
- Create stunning gradient VayuChat title with professional tagline
- Add lab attribution: "Sustainability Lab β’ IIT Gandhinagar"
- Center logo and branding for maximum visual impact
SCREENSHOT-READY IMPROVEMENTS:
- Fix pixelated plot display: use width=800 instead of use_column_width
- Shorten chat input text to prevent overflow
- Create awesome feedback system with gradient cards
- Clean button design: "β¨ Excellent" and "π§ Needs work"
- Remove clutter, keep only essential UI elements
VISUAL POLISH:
- Professional header layout with proper spacing
- Beautiful gradient backgrounds and modern styling
- High-quality plot rendering for presentations
- Clean, modern interface perfect for demos
Ready for professional screenshots, presentations, and demos\! πΈβ¨
π€ Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <[email protected]>
@@ -573,8 +573,35 @@ if "gpt-oss-120b" in available_models:
|
|
573 |
elif "deepseek-R1" in available_models:
|
574 |
default_index = available_models.index("deepseek-R1")
|
575 |
|
576 |
-
#
|
577 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
578 |
|
579 |
|
580 |
# Load data with caching for better performance
|
@@ -803,7 +830,7 @@ def show_custom_response(response):
|
|
803 |
if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
|
804 |
if os.path.exists(content):
|
805 |
# Display image without showing filename
|
806 |
-
st.image(content,
|
807 |
return {"is_image": True}
|
808 |
# Also handle case where content shows filename but we want to show image
|
809 |
elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']):
|
@@ -813,7 +840,7 @@ def show_custom_response(response):
|
|
813 |
if filename_match:
|
814 |
filename = filename_match.group(1)
|
815 |
if os.path.exists(filename):
|
816 |
-
st.image(filename,
|
817 |
return {"is_image": True}
|
818 |
except:
|
819 |
pass
|
@@ -835,39 +862,50 @@ for response_id, response in enumerate(st.session_state.responses):
|
|
835 |
code = response.get("gen_code", "")
|
836 |
|
837 |
|
|
|
838 |
if "feedback" in st.session_state.responses[response_id]:
|
839 |
feedback_data = st.session_state.responses[response_id]["feedback"]
|
840 |
st.markdown(f"""
|
841 |
-
<div
|
842 |
-
|
843 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
844 |
</div>
|
845 |
""", unsafe_allow_html=True)
|
846 |
else:
|
847 |
-
#
|
848 |
-
st.markdown("
|
849 |
-
|
|
|
|
|
|
|
|
|
|
|
850 |
with col1:
|
851 |
-
|
|
|
|
|
|
|
852 |
with col2:
|
853 |
-
|
854 |
-
|
855 |
-
|
856 |
-
|
857 |
-
thumbs = "π Good"
|
858 |
-
else:
|
859 |
-
thumbs = "π Needs work"
|
860 |
-
comments = st.text_input("Optional comment:", key=f"{feedback_key}_comments")
|
861 |
-
|
862 |
-
feedback = {"score": thumbs, "text": comments}
|
863 |
-
st.session_state.responses[response_id]["feedback"] = feedback
|
864 |
-
st.success("Thanks for your feedback!")
|
865 |
-
st.rerun()
|
866 |
|
867 |
-
#
|
868 |
-
col1, col2, col3 = st.columns([1,
|
869 |
with col1:
|
870 |
-
if st.button("π Retry", key=f"retry_{response_id}", help="Regenerate
|
871 |
# Get the last user prompt that led to this response
|
872 |
user_prompt = ""
|
873 |
if response_id > 0:
|
@@ -884,14 +922,9 @@ for response_id, response in enumerate(st.session_state.responses):
|
|
884 |
# Re-add user message and trigger new response
|
885 |
st.session_state.follow_up_prompt = retry_prompt
|
886 |
st.rerun()
|
887 |
-
|
888 |
-
with col2:
|
889 |
-
if st.button("π¬ Follow-up", key=f"followup_{response_id}", help="Ask a follow-up question"):
|
890 |
-
st.session_state.follow_up_mode = True
|
891 |
-
st.rerun()
|
892 |
|
893 |
# Chat input with better guidance
|
894 |
-
prompt = st.chat_input("π¬ Ask about air quality trends,
|
895 |
|
896 |
# Handle selected prompt from quick prompts
|
897 |
if selected_prompt:
|
|
|
573 |
elif "deepseek-R1" in available_models:
|
574 |
default_index = available_models.index("deepseek-R1")
|
575 |
|
576 |
+
# Beautiful header with logo and branding
|
577 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
578 |
+
with col2:
|
579 |
+
st.markdown("""
|
580 |
+
<div style='text-align: center; padding: 1rem 0;'>
|
581 |
+
<img src='https://sustainability-lab.github.io/images/logo_light.svg'
|
582 |
+
style='height: 60px; margin-bottom: 10px;' />
|
583 |
+
<h1 style='
|
584 |
+
margin: 0;
|
585 |
+
font-size: 2.5rem;
|
586 |
+
font-weight: 700;
|
587 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
588 |
+
-webkit-background-clip: text;
|
589 |
+
-webkit-text-fill-color: transparent;
|
590 |
+
background-clip: text;
|
591 |
+
'>VayuChat</h1>
|
592 |
+
<p style='
|
593 |
+
margin: 5px 0 0 0;
|
594 |
+
font-size: 1.1rem;
|
595 |
+
color: #6b7280;
|
596 |
+
font-weight: 500;
|
597 |
+
'>AI-Powered Air Quality Data Analysis</p>
|
598 |
+
<p style='
|
599 |
+
margin: 2px 0 0 0;
|
600 |
+
font-size: 0.9rem;
|
601 |
+
color: #9ca3af;
|
602 |
+
'>Sustainability Lab β’ IIT Gandhinagar</p>
|
603 |
+
</div>
|
604 |
+
""", unsafe_allow_html=True)
|
605 |
|
606 |
|
607 |
# Load data with caching for better performance
|
|
|
830 |
if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
|
831 |
if os.path.exists(content):
|
832 |
# Display image without showing filename
|
833 |
+
st.image(content, width=800)
|
834 |
return {"is_image": True}
|
835 |
# Also handle case where content shows filename but we want to show image
|
836 |
elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']):
|
|
|
840 |
if filename_match:
|
841 |
filename = filename_match.group(1)
|
842 |
if os.path.exists(filename):
|
843 |
+
st.image(filename, width=800)
|
844 |
return {"is_image": True}
|
845 |
except:
|
846 |
pass
|
|
|
862 |
code = response.get("gen_code", "")
|
863 |
|
864 |
|
865 |
+
# Awesome feedback system
|
866 |
if "feedback" in st.session_state.responses[response_id]:
|
867 |
feedback_data = st.session_state.responses[response_id]["feedback"]
|
868 |
st.markdown(f"""
|
869 |
+
<div style='
|
870 |
+
background: linear-gradient(135deg, #f3f4f6 0%, #e5e7eb 100%);
|
871 |
+
border: 1px solid #d1d5db;
|
872 |
+
border-radius: 12px;
|
873 |
+
padding: 1rem;
|
874 |
+
margin: 0.5rem 0;
|
875 |
+
display: flex;
|
876 |
+
align-items: center;
|
877 |
+
gap: 12px;
|
878 |
+
'>
|
879 |
+
<span style='font-size: 1.2rem;'>{feedback_data.get('score', '')}</span>
|
880 |
+
<span style='color: #6b7280; font-weight: 500;'>
|
881 |
+
{feedback_data.get('text', 'Thanks for your feedback!')}
|
882 |
+
</span>
|
883 |
</div>
|
884 |
""", unsafe_allow_html=True)
|
885 |
else:
|
886 |
+
# Beautiful feedback buttons
|
887 |
+
st.markdown("""
|
888 |
+
<div style='margin: 0.5rem 0; color: #6b7280; font-size: 0.9rem; font-weight: 500;'>
|
889 |
+
How was this response?
|
890 |
+
</div>
|
891 |
+
""", unsafe_allow_html=True)
|
892 |
+
|
893 |
+
col1, col2, col3 = st.columns([1, 1, 3])
|
894 |
with col1:
|
895 |
+
if st.button("β¨ Excellent", key=f"{feedback_key}_excellent", help="This response was very helpful!"):
|
896 |
+
feedback = {"score": "β¨ Excellent", "text": ""}
|
897 |
+
st.session_state.responses[response_id]["feedback"] = feedback
|
898 |
+
st.rerun()
|
899 |
with col2:
|
900 |
+
if st.button("π§ Needs work", key=f"{feedback_key}_poor", help="This response could be improved"):
|
901 |
+
feedback = {"score": "π§ Needs work", "text": ""}
|
902 |
+
st.session_state.responses[response_id]["feedback"] = feedback
|
903 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
904 |
|
905 |
+
# Clean action buttons
|
906 |
+
col1, col2, col3 = st.columns([1, 4, 1])
|
907 |
with col1:
|
908 |
+
if st.button("π Retry", key=f"retry_{response_id}", help="Regenerate with current model"):
|
909 |
# Get the last user prompt that led to this response
|
910 |
user_prompt = ""
|
911 |
if response_id > 0:
|
|
|
922 |
# Re-add user message and trigger new response
|
923 |
st.session_state.follow_up_prompt = retry_prompt
|
924 |
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
925 |
|
926 |
# Chat input with better guidance
|
927 |
+
prompt = st.chat_input("π¬ Ask about air quality trends, pollution analysis, or city comparisons...", key="main_chat")
|
928 |
|
929 |
# Handle selected prompt from quick prompts
|
930 |
if selected_prompt:
|