VayuChat / app.py
Vedant-acharya's picture
Setting default model as GPT-OSS-120B
80a5c5e verified
import streamlit as st
import os
import json
import pandas as pd
import random
from datetime import datetime
from os.path import join
from src import (
preprocess_and_load_df,
get_from_user,
ask_question,
)
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_google_genai import ChatGoogleGenerativeAI
from streamlit_feedback import streamlit_feedback
from huggingface_hub import HfApi
from datasets import load_dataset, get_dataset_config_info, Dataset
from PIL import Image
import time
import uuid
import asyncio
# Gemini API requires async
try:
asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Page config with beautiful theme
st.set_page_config(
page_title="VayuChat - AI Air Quality Assistant",
page_icon="V",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS for beautiful styling
st.markdown("""
<style>
/* Clean app background */
.stApp {
background-color: #ffffff;
color: #212529;
font-family: 'Segoe UI', sans-serif;
}
/* Reduce main container padding */
.main .block-container {
padding-top: 0.5rem;
padding-bottom: 3rem;
max-width: 100%;
}
/* Remove excessive spacing */
.element-container {
margin-bottom: 0.5rem !important;
}
/* Fix sidebar spacing */
[data-testid="stSidebar"] .element-container {
margin-bottom: 0.25rem !important;
}
/* Sidebar */
[data-testid="stSidebar"] {
background-color: #f8f9fa;
border-right: 1px solid #dee2e6;
padding: 1rem;
}
/* Optimize sidebar scrolling */
[data-testid="stSidebar"] > div:first-child {
height: 100vh;
overflow-y: auto;
padding-bottom: 2rem;
}
[data-testid="stSidebar"]::-webkit-scrollbar {
width: 6px;
}
[data-testid="stSidebar"]::-webkit-scrollbar-track {
background: #f1f1f1;
border-radius: 3px;
}
[data-testid="stSidebar"]::-webkit-scrollbar-thumb {
background: #c1c1c1;
border-radius: 3px;
}
[data-testid="stSidebar"]::-webkit-scrollbar-thumb:hover {
background: #a1a1a1;
}
/* Main title */
.main-title {
text-align: center;
color: #343a40;
font-size: 2.5rem;
font-weight: 700;
margin-bottom: 0.5rem;
}
/* Subtitle */
.subtitle {
text-align: center;
color: #6c757d;
font-size: 1.1rem;
margin-bottom: 1.5rem;
}
/* Instructions */
.instructions {
background-color: #f1f3f5;
border-left: 4px solid #0d6efd;
padding: 1rem;
margin-bottom: 1.5rem;
border-radius: 6px;
color: #495057;
text-align: left;
}
/* Quick prompt buttons */
.quick-prompt-container {
display: flex;
flex-wrap: wrap;
gap: 8px;
margin-bottom: 1.5rem;
padding: 1rem;
background-color: #f8f9fa;
border-radius: 10px;
border: 1px solid #dee2e6;
}
.quick-prompt-btn {
background-color: #0d6efd;
color: white;
border: none;
padding: 8px 16px;
border-radius: 20px;
font-size: 0.9rem;
cursor: pointer;
transition: all 0.2s ease;
white-space: nowrap;
}
.quick-prompt-btn:hover {
background-color: #0b5ed7;
transform: translateY(-2px);
}
/* User message styling */
.user-message {
background: #3b82f6;
color: white;
padding: 0.75rem 1rem;
border-radius: 12px;
max-width: 85%;
}
.user-info {
font-size: 0.875rem;
opacity: 0.9;
margin-bottom: 3px;
}
/* Assistant message styling */
.assistant-message {
background: #f1f5f9;
color: #334155;
padding: 0.75rem 1rem;
border-radius: 12px;
max-width: 85%;
}
.assistant-info {
font-size: 0.875rem;
color: #6b7280;
margin-bottom: 5px;
}
/* Processing indicator */
.processing-indicator {
background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
color: #333;
padding: 1rem 1.5rem;
border-radius: 12px;
margin: 1rem 0;
margin-left: 0;
margin-right: auto;
max-width: 70%;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
animation: pulse 2s infinite;
}
@keyframes pulse {
0% { opacity: 1; }
50% { opacity: 0.7; }
100% { opacity: 1; }
}
/* Feedback box */
.feedback-section {
background-color: #f8f9fa;
border: 1px solid #dee2e6;
padding: 1rem;
border-radius: 8px;
margin: 1rem 0;
}
/* Success and error messages */
.success-message {
background-color: #d1e7dd;
color: #0f5132;
padding: 1rem;
border-radius: 6px;
border: 1px solid #badbcc;
}
.error-message {
background-color: #f8d7da;
color: #842029;
padding: 1rem;
border-radius: 6px;
border: 1px solid #f5c2c7;
}
/* Chat input styling like mockup */
.stChatInput {
border-radius: 8px;
border: 1px solid #d1d5db;
background: #ffffff;
padding: 0.75rem 1rem;
font-size: 1rem;
width: 100% !important;
max-width: none !important;
}
.stChatInput:focus {
border-color: #3b82f6;
box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1);
}
/* Button */
.stButton > button {
background-color: #0d6efd;
color: white;
border-radius: 6px;
padding: 0.5rem 1.25rem;
border: none;
font-weight: 600;
transition: background-color 0.2s ease;
}
.stButton > button:hover {
background-color: #0b5ed7;
}
/* Sidebar button styling - smaller, left-aligned */
[data-testid="stSidebar"] .stButton > button {
background-color: #f8fafc;
color: #475569;
border: 1px solid #e2e8f0;
padding: 0.375rem 0.75rem;
font-size: 0.65rem;
font-weight: normal;
text-align: left;
white-space: normal;
height: auto;
line-height: 1.2;
transition: all 0.2s ease;
cursor: pointer;
margin-bottom: 0.25rem;
width: 100%;
display: flex;
justify-content: flex-start;
}
[data-testid="stSidebar"] .stButton > button:hover {
background-color: #e0f2fe;
border-color: #0ea5e9;
color: #0c4a6e;
}
[data-testid="stSidebar"] .stButton > button:active {
transform: translateY(0);
box-shadow: none;
}
/* Code container styling */
.code-container {
margin: 1rem 0;
border: 1px solid #d1d5db;
border-radius: 12px;
background: white;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
.code-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.875rem 1.25rem;
background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%);
border-bottom: 1px solid #e2e8f0;
cursor: pointer;
transition: all 0.2s ease;
border-radius: 12px 12px 0 0;
}
.code-header:hover {
background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%);
}
.code-title {
font-size: 0.9rem;
font-weight: 600;
color: #1e293b;
display: flex;
align-items: center;
gap: 0.5rem;
}
.code-title:before {
content: "⚑";
font-size: 0.8rem;
}
.toggle-text {
font-size: 0.75rem;
color: #64748b;
font-weight: 500;
}
.code-block {
background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%);
color: #e2e8f0;
padding: 1.5rem;
font-family: 'SF Mono', 'Monaco', 'Menlo', 'Consolas', monospace;
font-size: 0.875rem;
overflow-x: auto;
line-height: 1.6;
border-radius: 0 0 12px 12px;
}
.answer-container {
background: #f8fafc;
border: 1px solid #e2e8f0;
border-radius: 8px;
padding: 1.5rem;
margin: 1rem 0;
}
.answer-text {
font-size: 1.125rem;
color: #1e293b;
line-height: 1.6;
margin-bottom: 1rem;
}
.answer-highlight {
background: #fef3c7;
padding: 0.125rem 0.375rem;
border-radius: 4px;
font-weight: 600;
color: #92400e;
}
.context-info {
background: #f1f5f9;
border-left: 4px solid #3b82f6;
padding: 0.75rem 1rem;
margin: 1rem 0;
font-size: 0.875rem;
color: #475569;
}
/* Hide default menu and footer */
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
/* Auto scroll */
.main-container {
height: 70vh;
overflow-y: auto;
}
</style>
""", unsafe_allow_html=True)
# JavaScript for interactions
st.markdown("""
<script>
function scrollToBottom() {
setTimeout(function() {
const mainContainer = document.querySelector('.main-container');
if (mainContainer) {
mainContainer.scrollTop = mainContainer.scrollHeight;
}
window.scrollTo(0, document.body.scrollHeight);
}, 100);
}
function toggleCode(header) {
const codeBlock = header.nextElementSibling;
const toggleText = header.querySelector('.toggle-text');
if (codeBlock.style.display === 'none') {
codeBlock.style.display = 'block';
toggleText.textContent = 'Click to collapse';
} else {
codeBlock.style.display = 'none';
toggleText.textContent = 'Click to expand';
}
}
</script>
""", unsafe_allow_html=True)
# FORCE reload environment variables
load_dotenv(override=True)
# Get API keys
Groq_Token = os.getenv("GROQ_API_KEY")
hf_token = os.getenv("HF_TOKEN")
gemini_token = os.getenv("GEMINI_TOKEN")
# Model order is decided by this
models = {
"gpt-oss-120b": "openai/gpt-oss-120b",
"qwen3-32b": "qwen/qwen3-32b",
"gpt-oss-20b": "openai/gpt-oss-20b",
"llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct",
"llama3.3": "llama-3.3-70b-versatile",
"deepseek-R1": "deepseek-r1-distill-llama-70b",
"gemini-2.5-flash": "gemini-2.5-flash",
"gemini-2.5-pro": "gemini-2.5-pro",
"gemini-2.5-flash-lite": "gemini-2.5-flash-lite",
"gemini-2.0-flash": "gemini-2.0-flash",
"gemini-2.0-flash-lite": "gemini-2.0-flash-lite",
# "llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct"
# "llama3.1": "llama-3.1-8b-instant"
}
self_path = os.path.dirname(os.path.abspath(__file__))
# Initialize session ID for this session
if "session_id" not in st.session_state:
st.session_state.session_id = str(uuid.uuid4())
def upload_feedback(feedback, error, output, last_prompt, code, status):
"""Enhanced feedback upload function with better logging and error handling"""
try:
if not hf_token or hf_token.strip() == "":
st.warning("Cannot upload feedback - HF_TOKEN not available")
return False
# Create comprehensive feedback data
feedback_data = {
"timestamp": datetime.now().isoformat(),
"session_id": st.session_state.session_id,
"feedback_score": feedback.get("score", ""),
"feedback_comment": feedback.get("text", ""),
"user_prompt": last_prompt,
"ai_output": str(output),
"generated_code": code or "",
"error_message": error or "",
"is_image_output": status.get("is_image", False),
"success": not bool(error)
}
# Create unique folder name with timestamp
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
random_id = str(uuid.uuid4())[:8]
folder_name = f"feedback_{timestamp_str}_{random_id}"
# Create markdown feedback file
markdown_content = f"""# VayuChat Feedback Report
## Session Information
- **Timestamp**: {feedback_data['timestamp']}
- **Session ID**: {feedback_data['session_id']}
## User Interaction
**Prompt**: {feedback_data['user_prompt']}
## AI Response
**Output**: {feedback_data['ai_output']}
## Generated Code
```python
{feedback_data['generated_code']}
```
## Technical Details
- **Error Message**: {feedback_data['error_message']}
- **Is Image Output**: {feedback_data['is_image_output']}
- **Success**: {feedback_data['success']}
## User Feedback
- **Score**: {feedback_data['feedback_score']}
- **Comments**: {feedback_data['feedback_comment']}
"""
# Save markdown file locally
markdown_filename = f"{folder_name}.md"
markdown_local_path = f"/tmp/{markdown_filename}"
with open(markdown_local_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
# Upload to Hugging Face
api = HfApi(token=hf_token)
# Upload markdown feedback
api.upload_file(
path_or_fileobj=markdown_local_path,
path_in_repo=f"data/{markdown_filename}",
repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
repo_type="dataset",
)
# Upload image if it exists and is an image output
if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output):
try:
image_filename = f"{folder_name}_plot.png"
api.upload_file(
path_or_fileobj=output,
path_in_repo=f"data/{image_filename}",
repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
repo_type="dataset",
)
except Exception as img_error:
print(f"Error uploading image: {img_error}")
# Clean up local files
if os.path.exists(markdown_local_path):
os.remove(markdown_local_path)
st.success("Feedback uploaded successfully!")
return True
except Exception as e:
st.error(f"Error uploading feedback: {e}")
print(f"Feedback upload error: {e}")
return False
# Filter available models
available_models = []
model_names = list(models.keys())
groq_models = []
gemini_models = []
for model_name in model_names:
if "gemini" not in model_name:
groq_models.append(model_name)
else:
gemini_models.append(model_name)
if Groq_Token and Groq_Token.strip():
available_models.extend(groq_models)
if gemini_token and gemini_token.strip():
available_models.extend(gemini_models)
if not available_models:
st.error("No API keys available! Please set up your API keys in the .env file")
st.stop()
# Set GPT-OSS-120B as default if available
default_index = 0
if "gpt-oss-120b" in available_models:
default_index = available_models.index("gpt-oss-120b")
elif "deepseek-R1" in available_models:
default_index = available_models.index("deepseek-R1")
# Simple header - just title
st.title("VayuChat")
# Load data with caching for better performance
@st.cache_data
def load_data():
return preprocess_and_load_df(join(self_path, "Data.csv"))
try:
df = load_data()
# Data loaded silently - no success message needed
except Exception as e:
st.error(f"Error loading data: {e}")
st.stop()
inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
image_path = "IITGN_Logo.png"
# Clean sidebar
with st.sidebar:
# Model selector at top of sidebar for easy access
model_name = st.selectbox(
"πŸ€– AI Model:",
available_models,
index=default_index,
help="Choose your AI model - easily accessible without scrolling!"
)
st.markdown("---")
# Quick Queries Section
st.markdown("### πŸ’­ Quick Queries")
# Load quick prompts with caching
@st.cache_data
def load_questions():
questions = []
questions_file = join(self_path, "questions.txt")
if os.path.exists(questions_file):
try:
with open(questions_file, 'r', encoding='utf-8') as f:
content = f.read()
questions = [q.strip() for q in content.split("\n") if q.strip()]
except Exception as e:
questions = []
return questions
questions = load_questions()
# Add default prompts if file doesn't exist or is empty
if not questions:
questions = [
"Which month had highest pollution?",
"Which city has worst air quality?",
"Show annual PM2.5 average",
"Plot monthly average PM2.5 for 2023",
"List all cities by pollution level",
"Compare winter vs summer pollution",
"Show seasonal pollution patterns",
"Which areas exceed WHO guidelines?",
"What are peak pollution hours?",
"Show PM10 vs PM2.5 comparison",
"Which station records highest variability in PM2.5?",
"Calculate pollution improvement rate year-over-year by city",
"Identify cities with PM2.5 levels consistently above 50 ΞΌg/mΒ³ for >6 months",
"Find correlation between PM2.5 and PM10 across different seasons and cities",
"Compare weekday vs weekend levels",
"Plot yearly trend analysis",
"Show pollution distribution by city",
"Create correlation plot between pollutants"
]
# Quick query buttons in sidebar
selected_prompt = None
# Show all questions but in a scrollable format
if len(questions) > 0:
st.markdown("**Select a question to analyze:**")
# Getting Started section with simple questions
getting_started_questions = questions[:10] # First 10 simple questions
with st.expander("πŸš€ Getting Started - Simple Questions", expanded=True):
for i, q in enumerate(getting_started_questions):
if st.button(q, key=f"start_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
selected_prompt = q
st.session_state.last_selected_prompt = q
# Create expandable sections for better organization
with st.expander("πŸ“Š NCAP Funding & Policy Analysis", expanded=False):
for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ncap', 'funding', 'investment', 'rupee'])]):
if st.button(q, key=f"ncap_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
selected_prompt = q
st.session_state.last_selected_prompt = q
with st.expander("🌬️ Meteorology & Environmental Factors", expanded=False):
for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'])]):
if st.button(q, key=f"met_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
selected_prompt = q
st.session_state.last_selected_prompt = q
with st.expander("πŸ‘₯ Population & Demographics", expanded=False):
for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['population', 'capita', 'density', 'exposure'])]):
if st.button(q, key=f"pop_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
selected_prompt = q
st.session_state.last_selected_prompt = q
with st.expander("🏭 Multi-Pollutant Analysis", expanded=False):
for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'])]):
if st.button(q, key=f"multi_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
selected_prompt = q
st.session_state.last_selected_prompt = q
with st.expander("πŸ“ˆ Other Analysis Questions", expanded=False):
remaining_questions = [q for q in questions if not any(any(word in q.lower() for word in category) for category in [
['ncap', 'funding', 'investment', 'rupee'],
['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'],
['population', 'capita', 'density', 'exposure'],
['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction']
])]
for i, q in enumerate(remaining_questions):
if st.button(q, key=f"other_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
selected_prompt = q
st.session_state.last_selected_prompt = q
st.markdown("---")
# Clear Chat Button
if st.button("Clear Chat", use_container_width=True):
st.session_state.responses = []
st.session_state.processing = False
st.session_state.session_id = str(uuid.uuid4())
try:
st.rerun()
except AttributeError:
st.experimental_rerun()
# Initialize session state first
if "responses" not in st.session_state:
st.session_state.responses = []
if "processing" not in st.session_state:
st.session_state.processing = False
if "session_id" not in st.session_state:
st.session_state.session_id = str(uuid.uuid4())
def show_custom_response(response):
"""Custom response display function with improved styling"""
role = response.get("role", "assistant")
content = response.get("content", "")
if role == "user":
# User message with right alignment - reduced margins
st.markdown(f"""
<div style='display: flex; justify-content: flex-end; margin: 1rem 0;'>
<div class='user-message'>
{content}
</div>
</div>
""", unsafe_allow_html=True)
elif role == "assistant":
# Check if content is an image filename - don't display the filename text
is_image_path = isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg', '.jpeg'])
# Check if content is a pandas DataFrame
import pandas as pd
is_dataframe = isinstance(content, pd.DataFrame)
# Check for errors first and display them with special styling
error = response.get("error")
timestamp = response.get("timestamp", "")
timestamp_display = f" β€’ {timestamp}" if timestamp else ""
if error:
st.markdown(f"""
<div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
<div class='assistant-message'>
<div class='assistant-info'>VayuChat{timestamp_display}</div>
<div class='error-message'>
⚠️ <strong>Error:</strong> {error}
<br><br>
<em>πŸ’‘ Try rephrasing your question or being more specific about what you'd like to analyze.</em>
</div>
</div>
</div>
""", unsafe_allow_html=True)
# Assistant message with left alignment - reduced margins
elif not is_image_path and not is_dataframe:
st.markdown(f"""
<div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
<div class='assistant-message'>
<div class='assistant-info'>VayuChat{timestamp_display}</div>
{content if isinstance(content, str) else str(content)}
</div>
</div>
""", unsafe_allow_html=True)
elif is_dataframe:
# Display DataFrame with nice formatting
st.markdown(f"""
<div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
<div class='assistant-message'>
<div class='assistant-info'>VayuChat{timestamp_display}</div>
Here are the results:
</div>
</div>
""", unsafe_allow_html=True)
# Add context info for dataframes
st.markdown("""
<div class='context-info'>
πŸ’‘ This table is interactive - click column headers to sort, or scroll to view all data.
</div>
""", unsafe_allow_html=True)
st.dataframe(content, use_container_width=True)
# Show generated code with Streamlit expander
if response.get("gen_code"):
with st.expander("πŸ“‹ View Generated Code", expanded=False):
st.code(response["gen_code"], language="python")
# Try to display image if content is a file path
try:
if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
if os.path.exists(content):
# Display image without showing filename
st.image(content, use_column_width=True)
return {"is_image": True}
# Also handle case where content shows filename but we want to show image
elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']):
# Extract potential filename from content
import re
filename_match = re.search(r'([^/\\]+\.(?:png|jpg|jpeg))', content)
if filename_match:
filename = filename_match.group(1)
if os.path.exists(filename):
st.image(filename, use_column_width=True)
return {"is_image": True}
except:
pass
return {"is_image": False}
# Chat history
# Display chat history
for response_id, response in enumerate(st.session_state.responses):
status = show_custom_response(response)
# Show feedback section for assistant responses
if response["role"] == "assistant":
feedback_key = f"feedback_{int(response_id/2)}"
error = response.get("error", "")
output = response.get("content", "")
last_prompt = response.get("last_prompt", "")
code = response.get("gen_code", "")
if "feedback" in st.session_state.responses[response_id]:
feedback_data = st.session_state.responses[response_id]["feedback"]
st.markdown(f"""
<div class='feedback-section'>
<strong>Your Feedback:</strong> {feedback_data.get('score', '')}
{f"- {feedback_data.get('text', '')}" if feedback_data.get('text') else ""}
</div>
""", unsafe_allow_html=True)
else:
# Simple feedback
st.markdown("**Rate this response:**")
col1, col2 = st.columns(2)
with col1:
good = st.button("πŸ‘ Good", key=f"{feedback_key}_good")
with col2:
poor = st.button("πŸ‘Ž Needs work", key=f"{feedback_key}_poor")
if good or poor:
if good:
thumbs = "πŸ‘ Good"
else:
thumbs = "πŸ‘Ž Needs work"
comments = st.text_input("Optional comment:", key=f"{feedback_key}_comments")
feedback = {"score": thumbs, "text": comments}
st.session_state.responses[response_id]["feedback"] = feedback
st.success("Thanks for your feedback!")
st.rerun()
# Add retry button for each assistant response
col1, col2, col3 = st.columns([1, 1, 2])
with col1:
if st.button("πŸ”„ Retry", key=f"retry_{response_id}", help="Regenerate this response with current model"):
# Get the last user prompt that led to this response
user_prompt = ""
if response_id > 0:
user_prompt = st.session_state.responses[response_id-1].get("content", "")
if user_prompt:
# Remove this response and the user message before it, then re-add the user message
if response_id > 0:
# Store the user prompt
retry_prompt = st.session_state.responses[response_id-1].get("content", "")
# Remove both user message and assistant response
del st.session_state.responses[response_id]
del st.session_state.responses[response_id-1]
# Re-add user message and trigger new response
st.session_state.follow_up_prompt = retry_prompt
st.rerun()
with col2:
if st.button("πŸ’¬ Follow-up", key=f"followup_{response_id}", help="Ask a follow-up question"):
st.session_state.follow_up_mode = True
st.rerun()
# Chat input with better guidance
prompt = st.chat_input("πŸ’¬ Ask about air quality trends, compare cities, or request visualizations...", key="main_chat")
# Handle selected prompt from quick prompts
if selected_prompt:
prompt = selected_prompt
# Handle follow-up prompts from quick action buttons
if st.session_state.get("follow_up_prompt") and not st.session_state.get("processing"):
prompt = st.session_state.follow_up_prompt
st.session_state.follow_up_prompt = None # Clear the follow-up prompt
# Handle new queries
if prompt and not st.session_state.get("processing"):
# Prevent duplicate processing
if "last_prompt" in st.session_state:
last_prompt = st.session_state["last_prompt"]
last_model_name = st.session_state.get("last_model_name", "")
if (prompt == last_prompt) and (model_name == last_model_name):
prompt = None
if prompt:
# Add user input to chat history
user_response = get_from_user(prompt)
st.session_state.responses.append(user_response)
# Set processing state
st.session_state.processing = True
st.session_state.current_model = model_name
st.session_state.current_question = prompt
# Rerun to show processing indicator
st.rerun()
# Process the question if we're in processing state
if st.session_state.get("processing"):
# Enhanced processing indicator like Claude Code
st.markdown("""
<div style='padding: 1rem; text-align: center; background: #f8fafc; border-radius: 8px; margin: 1rem 0;'>
<div style='display: flex; align-items: center; justify-content: center; gap: 0.5rem; color: #475569;'>
<div style='font-weight: 500;'>πŸ€– Processing with """ + str(st.session_state.get('current_model', 'Unknown')) + """</div>
<div class='dots' style='display: inline-flex; gap: 2px;'>
<div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out;'></div>
<div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.16s;'></div>
<div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.32s;'></div>
</div>
</div>
<div style='font-size: 0.75rem; color: #6b7280; margin-top: 0.25rem;'>Analyzing data and generating response...</div>
</div>
<style>
@keyframes bounce {
0%, 80%, 100% { transform: scale(0.8); opacity: 0.5; }
40% { transform: scale(1.2); opacity: 1; }
}
</style>
""", unsafe_allow_html=True)
prompt = st.session_state.get("current_question")
model_name = st.session_state.get("current_model")
try:
response = ask_question(model_name=model_name, question=prompt)
if not isinstance(response, dict):
response = {
"role": "assistant",
"content": "Error: Invalid response format",
"gen_code": "",
"ex_code": "",
"last_prompt": prompt,
"error": "Invalid response format",
"timestamp": datetime.now().strftime("%H:%M")
}
response.setdefault("role", "assistant")
response.setdefault("content", "No content generated")
response.setdefault("gen_code", "")
response.setdefault("ex_code", "")
response.setdefault("last_prompt", prompt)
response.setdefault("error", None)
response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
except Exception as e:
response = {
"role": "assistant",
"content": f"Sorry, I encountered an error: {str(e)}",
"gen_code": "",
"ex_code": "",
"last_prompt": prompt,
"error": str(e),
"timestamp": datetime.now().strftime("%H:%M")
}
st.session_state.responses.append(response)
st.session_state["last_prompt"] = prompt
st.session_state["last_model_name"] = model_name
st.session_state.processing = False
# Clear processing state
if "current_model" in st.session_state:
del st.session_state.current_model
if "current_question" in st.session_state:
del st.session_state.current_question
st.rerun()
# Close chat container
st.markdown("</div>", unsafe_allow_html=True)
# Minimal auto-scroll - only scroll when processing
if st.session_state.get("processing"):
st.markdown("<script>scrollToBottom();</script>", unsafe_allow_html=True)
# Beautiful sidebar footer
# with st.sidebar:
# st.markdown("---")
# st.markdown("""
# <div class='contact-section'>
# <h4>πŸ“„ Paper on VayuChat</h4>
# <p>Learn more about VayuChat in our <a href='https://arxiv.org/abs/2411.12760' target='_blank'>Research Paper</a>.</p>
# </div>
# """, unsafe_allow_html=True)
# Dataset Info Section (matching mockup)
st.markdown("### Dataset Info")
st.markdown("""
<div style='background: #f1f5f9; border-radius: 8px; padding: 1rem; margin-bottom: 1rem;'>
<h4 style='margin: 0 0 0.5rem 0; color: #1e293b; font-size: 0.9rem;'>PM2.5 Air Quality Data</h4>
<p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Time Range:</strong> 2022 - 2023</p>
<p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Locations:</strong> 300+ cities across India</p>
<p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Records:</strong> 100,000+ measurements</p>
</div>
""", unsafe_allow_html=True)