import streamlit as st import os import json import pandas as pd import random from datetime import datetime from os.path import join from src import ( preprocess_and_load_df, get_from_user, ask_question, ) from dotenv import load_dotenv from langchain_groq import ChatGroq from langchain_google_genai import ChatGoogleGenerativeAI from streamlit_feedback import streamlit_feedback from huggingface_hub import HfApi from datasets import load_dataset, get_dataset_config_info, Dataset from PIL import Image import time import uuid import asyncio # Gemini API requires async try: asyncio.get_running_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # Page config with beautiful theme st.set_page_config( page_title="VayuChat - AI Air Quality Assistant", page_icon="V", layout="wide", initial_sidebar_state="expanded" ) # Custom CSS for beautiful styling st.markdown(""" """, unsafe_allow_html=True) # JavaScript for interactions st.markdown(""" """, unsafe_allow_html=True) # FORCE reload environment variables load_dotenv(override=True) # Get API keys Groq_Token = os.getenv("GROQ_API_KEY") hf_token = os.getenv("HF_TOKEN") gemini_token = os.getenv("GEMINI_TOKEN") # Model order is decided by this models = { "gpt-oss-120b": "openai/gpt-oss-120b", "qwen3-32b": "qwen/qwen3-32b", "gpt-oss-20b": "openai/gpt-oss-20b", "llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct", "llama3.3": "llama-3.3-70b-versatile", "deepseek-R1": "deepseek-r1-distill-llama-70b", "gemini-2.5-flash": "gemini-2.5-flash", "gemini-2.5-pro": "gemini-2.5-pro", "gemini-2.5-flash-lite": "gemini-2.5-flash-lite", "gemini-2.0-flash": "gemini-2.0-flash", "gemini-2.0-flash-lite": "gemini-2.0-flash-lite", # "llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct" # "llama3.1": "llama-3.1-8b-instant" } self_path = os.path.dirname(os.path.abspath(__file__)) # Initialize session ID for this session if "session_id" not in st.session_state: st.session_state.session_id = str(uuid.uuid4()) def upload_feedback(feedback, error, output, last_prompt, code, status): """Enhanced feedback upload function with better logging and error handling""" try: if not hf_token or hf_token.strip() == "": st.warning("Cannot upload feedback - HF_TOKEN not available") return False # Create comprehensive feedback data feedback_data = { "timestamp": datetime.now().isoformat(), "session_id": st.session_state.session_id, "feedback_score": feedback.get("score", ""), "feedback_comment": feedback.get("text", ""), "user_prompt": last_prompt, "ai_output": str(output), "generated_code": code or "", "error_message": error or "", "is_image_output": status.get("is_image", False), "success": not bool(error) } # Create unique folder name with timestamp timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S") random_id = str(uuid.uuid4())[:8] folder_name = f"feedback_{timestamp_str}_{random_id}" # Create markdown feedback file markdown_content = f"""# VayuChat Feedback Report ## Session Information - **Timestamp**: {feedback_data['timestamp']} - **Session ID**: {feedback_data['session_id']} ## User Interaction **Prompt**: {feedback_data['user_prompt']} ## AI Response **Output**: {feedback_data['ai_output']} ## Generated Code ```python {feedback_data['generated_code']} ``` ## Technical Details - **Error Message**: {feedback_data['error_message']} - **Is Image Output**: {feedback_data['is_image_output']} - **Success**: {feedback_data['success']} ## User Feedback - **Score**: {feedback_data['feedback_score']} - **Comments**: {feedback_data['feedback_comment']} """ # Save markdown file locally markdown_filename = f"{folder_name}.md" markdown_local_path = f"/tmp/{markdown_filename}" with open(markdown_local_path, "w", encoding="utf-8") as f: f.write(markdown_content) # Upload to Hugging Face api = HfApi(token=hf_token) # Upload markdown feedback api.upload_file( path_or_fileobj=markdown_local_path, path_in_repo=f"data/{markdown_filename}", repo_id="SustainabilityLabIITGN/VayuChat_Feedback", repo_type="dataset", ) # Upload image if it exists and is an image output if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output): try: image_filename = f"{folder_name}_plot.png" api.upload_file( path_or_fileobj=output, path_in_repo=f"data/{image_filename}", repo_id="SustainabilityLabIITGN/VayuChat_Feedback", repo_type="dataset", ) except Exception as img_error: print(f"Error uploading image: {img_error}") # Clean up local files if os.path.exists(markdown_local_path): os.remove(markdown_local_path) st.success("Feedback uploaded successfully!") return True except Exception as e: st.error(f"Error uploading feedback: {e}") print(f"Feedback upload error: {e}") return False # Filter available models available_models = [] model_names = list(models.keys()) groq_models = [] gemini_models = [] for model_name in model_names: if "gemini" not in model_name: groq_models.append(model_name) else: gemini_models.append(model_name) if Groq_Token and Groq_Token.strip(): available_models.extend(groq_models) if gemini_token and gemini_token.strip(): available_models.extend(gemini_models) if not available_models: st.error("No API keys available! Please set up your API keys in the .env file") st.stop() # Set GPT-OSS-120B as default if available default_index = 0 if "gpt-oss-120b" in available_models: default_index = available_models.index("gpt-oss-120b") elif "deepseek-R1" in available_models: default_index = available_models.index("deepseek-R1") # Simple header - just title st.title("VayuChat") # Load data with caching for better performance @st.cache_data def load_data(): return preprocess_and_load_df(join(self_path, "Data.csv")) try: df = load_data() # Data loaded silently - no success message needed except Exception as e: st.error(f"Error loading data: {e}") st.stop() inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2" image_path = "IITGN_Logo.png" # Clean sidebar with st.sidebar: # Model selector at top of sidebar for easy access model_name = st.selectbox( "🤖 AI Model:", available_models, index=default_index, help="Choose your AI model - easily accessible without scrolling!" ) st.markdown("---") # Quick Queries Section st.markdown("### 💭 Quick Queries") # Load quick prompts with caching @st.cache_data def load_questions(): questions = [] questions_file = join(self_path, "questions.txt") if os.path.exists(questions_file): try: with open(questions_file, 'r', encoding='utf-8') as f: content = f.read() questions = [q.strip() for q in content.split("\n") if q.strip()] except Exception as e: questions = [] return questions questions = load_questions() # Add default prompts if file doesn't exist or is empty if not questions: questions = [ "Which month had highest pollution?", "Which city has worst air quality?", "Show annual PM2.5 average", "Plot monthly average PM2.5 for 2023", "List all cities by pollution level", "Compare winter vs summer pollution", "Show seasonal pollution patterns", "Which areas exceed WHO guidelines?", "What are peak pollution hours?", "Show PM10 vs PM2.5 comparison", "Which station records highest variability in PM2.5?", "Calculate pollution improvement rate year-over-year by city", "Identify cities with PM2.5 levels consistently above 50 μg/m³ for >6 months", "Find correlation between PM2.5 and PM10 across different seasons and cities", "Compare weekday vs weekend levels", "Plot yearly trend analysis", "Show pollution distribution by city", "Create correlation plot between pollutants" ] # Quick query buttons in sidebar selected_prompt = None # Show all questions but in a scrollable format if len(questions) > 0: st.markdown("**Select a question to analyze:**") # Getting Started section with simple questions getting_started_questions = questions[:10] # First 10 simple questions with st.expander("🚀 Getting Started - Simple Questions", expanded=True): for i, q in enumerate(getting_started_questions): if st.button(q, key=f"start_q_{i}", use_container_width=True, help=f"Analyze: {q}"): selected_prompt = q st.session_state.last_selected_prompt = q # Create expandable sections for better organization with st.expander("📊 NCAP Funding & Policy Analysis", expanded=False): for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ncap', 'funding', 'investment', 'rupee'])]): if st.button(q, key=f"ncap_q_{i}", use_container_width=True, help=f"Analyze: {q}"): selected_prompt = q st.session_state.last_selected_prompt = q with st.expander("🌬️ Meteorology & Environmental Factors", expanded=False): for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'])]): if st.button(q, key=f"met_q_{i}", use_container_width=True, help=f"Analyze: {q}"): selected_prompt = q st.session_state.last_selected_prompt = q with st.expander("👥 Population & Demographics", expanded=False): for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['population', 'capita', 'density', 'exposure'])]): if st.button(q, key=f"pop_q_{i}", use_container_width=True, help=f"Analyze: {q}"): selected_prompt = q st.session_state.last_selected_prompt = q with st.expander("🏭 Multi-Pollutant Analysis", expanded=False): for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'])]): if st.button(q, key=f"multi_q_{i}", use_container_width=True, help=f"Analyze: {q}"): selected_prompt = q st.session_state.last_selected_prompt = q with st.expander("📈 Other Analysis Questions", expanded=False): remaining_questions = [q for q in questions if not any(any(word in q.lower() for word in category) for category in [ ['ncap', 'funding', 'investment', 'rupee'], ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'], ['population', 'capita', 'density', 'exposure'], ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'] ])] for i, q in enumerate(remaining_questions): if st.button(q, key=f"other_q_{i}", use_container_width=True, help=f"Analyze: {q}"): selected_prompt = q st.session_state.last_selected_prompt = q st.markdown("---") # Clear Chat Button if st.button("Clear Chat", use_container_width=True): st.session_state.responses = [] st.session_state.processing = False st.session_state.session_id = str(uuid.uuid4()) try: st.rerun() except AttributeError: st.experimental_rerun() # Initialize session state first if "responses" not in st.session_state: st.session_state.responses = [] if "processing" not in st.session_state: st.session_state.processing = False if "session_id" not in st.session_state: st.session_state.session_id = str(uuid.uuid4()) def show_custom_response(response): """Custom response display function with improved styling""" role = response.get("role", "assistant") content = response.get("content", "") if role == "user": # User message with right alignment - reduced margins st.markdown(f"""
{content}
""", unsafe_allow_html=True) elif role == "assistant": # Check if content is an image filename - don't display the filename text is_image_path = isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg', '.jpeg']) # Check if content is a pandas DataFrame import pandas as pd is_dataframe = isinstance(content, pd.DataFrame) # Check for errors first and display them with special styling error = response.get("error") timestamp = response.get("timestamp", "") timestamp_display = f" • {timestamp}" if timestamp else "" if error: st.markdown(f"""
VayuChat{timestamp_display}
⚠️ Error: {error}

💡 Try rephrasing your question or being more specific about what you'd like to analyze.
""", unsafe_allow_html=True) # Assistant message with left alignment - reduced margins elif not is_image_path and not is_dataframe: st.markdown(f"""
VayuChat{timestamp_display}
{content if isinstance(content, str) else str(content)}
""", unsafe_allow_html=True) elif is_dataframe: # Display DataFrame with nice formatting st.markdown(f"""
VayuChat{timestamp_display}
Here are the results:
""", unsafe_allow_html=True) # Add context info for dataframes st.markdown("""
💡 This table is interactive - click column headers to sort, or scroll to view all data.
""", unsafe_allow_html=True) st.dataframe(content, use_container_width=True) # Show generated code with Streamlit expander if response.get("gen_code"): with st.expander("📋 View Generated Code", expanded=False): st.code(response["gen_code"], language="python") # Try to display image if content is a file path try: if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')): if os.path.exists(content): # Display image without showing filename st.image(content, use_column_width=True) return {"is_image": True} # Also handle case where content shows filename but we want to show image elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']): # Extract potential filename from content import re filename_match = re.search(r'([^/\\]+\.(?:png|jpg|jpeg))', content) if filename_match: filename = filename_match.group(1) if os.path.exists(filename): st.image(filename, use_column_width=True) return {"is_image": True} except: pass return {"is_image": False} # Chat history # Display chat history for response_id, response in enumerate(st.session_state.responses): status = show_custom_response(response) # Show feedback section for assistant responses if response["role"] == "assistant": feedback_key = f"feedback_{int(response_id/2)}" error = response.get("error", "") output = response.get("content", "") last_prompt = response.get("last_prompt", "") code = response.get("gen_code", "") if "feedback" in st.session_state.responses[response_id]: feedback_data = st.session_state.responses[response_id]["feedback"] st.markdown(f"""
Your Feedback: {feedback_data.get('score', '')} {f"- {feedback_data.get('text', '')}" if feedback_data.get('text') else ""}
""", unsafe_allow_html=True) else: # Simple feedback st.markdown("**Rate this response:**") col1, col2 = st.columns(2) with col1: good = st.button("👍 Good", key=f"{feedback_key}_good") with col2: poor = st.button("👎 Needs work", key=f"{feedback_key}_poor") if good or poor: if good: thumbs = "👍 Good" else: thumbs = "👎 Needs work" comments = st.text_input("Optional comment:", key=f"{feedback_key}_comments") feedback = {"score": thumbs, "text": comments} st.session_state.responses[response_id]["feedback"] = feedback st.success("Thanks for your feedback!") st.rerun() # Add retry button for each assistant response col1, col2, col3 = st.columns([1, 1, 2]) with col1: if st.button("🔄 Retry", key=f"retry_{response_id}", help="Regenerate this response with current model"): # Get the last user prompt that led to this response user_prompt = "" if response_id > 0: user_prompt = st.session_state.responses[response_id-1].get("content", "") if user_prompt: # Remove this response and the user message before it, then re-add the user message if response_id > 0: # Store the user prompt retry_prompt = st.session_state.responses[response_id-1].get("content", "") # Remove both user message and assistant response del st.session_state.responses[response_id] del st.session_state.responses[response_id-1] # Re-add user message and trigger new response st.session_state.follow_up_prompt = retry_prompt st.rerun() with col2: if st.button("💬 Follow-up", key=f"followup_{response_id}", help="Ask a follow-up question"): st.session_state.follow_up_mode = True st.rerun() # Chat input with better guidance prompt = st.chat_input("💬 Ask about air quality trends, compare cities, or request visualizations...", key="main_chat") # Handle selected prompt from quick prompts if selected_prompt: prompt = selected_prompt # Handle follow-up prompts from quick action buttons if st.session_state.get("follow_up_prompt") and not st.session_state.get("processing"): prompt = st.session_state.follow_up_prompt st.session_state.follow_up_prompt = None # Clear the follow-up prompt # Handle new queries if prompt and not st.session_state.get("processing"): # Prevent duplicate processing if "last_prompt" in st.session_state: last_prompt = st.session_state["last_prompt"] last_model_name = st.session_state.get("last_model_name", "") if (prompt == last_prompt) and (model_name == last_model_name): prompt = None if prompt: # Add user input to chat history user_response = get_from_user(prompt) st.session_state.responses.append(user_response) # Set processing state st.session_state.processing = True st.session_state.current_model = model_name st.session_state.current_question = prompt # Rerun to show processing indicator st.rerun() # Process the question if we're in processing state if st.session_state.get("processing"): # Enhanced processing indicator like Claude Code st.markdown("""
🤖 Processing with """ + str(st.session_state.get('current_model', 'Unknown')) + """
Analyzing data and generating response...
""", unsafe_allow_html=True) prompt = st.session_state.get("current_question") model_name = st.session_state.get("current_model") try: response = ask_question(model_name=model_name, question=prompt) if not isinstance(response, dict): response = { "role": "assistant", "content": "Error: Invalid response format", "gen_code": "", "ex_code": "", "last_prompt": prompt, "error": "Invalid response format", "timestamp": datetime.now().strftime("%H:%M") } response.setdefault("role", "assistant") response.setdefault("content", "No content generated") response.setdefault("gen_code", "") response.setdefault("ex_code", "") response.setdefault("last_prompt", prompt) response.setdefault("error", None) response.setdefault("timestamp", datetime.now().strftime("%H:%M")) except Exception as e: response = { "role": "assistant", "content": f"Sorry, I encountered an error: {str(e)}", "gen_code": "", "ex_code": "", "last_prompt": prompt, "error": str(e), "timestamp": datetime.now().strftime("%H:%M") } st.session_state.responses.append(response) st.session_state["last_prompt"] = prompt st.session_state["last_model_name"] = model_name st.session_state.processing = False # Clear processing state if "current_model" in st.session_state: del st.session_state.current_model if "current_question" in st.session_state: del st.session_state.current_question st.rerun() # Close chat container st.markdown("", unsafe_allow_html=True) # Minimal auto-scroll - only scroll when processing if st.session_state.get("processing"): st.markdown("", unsafe_allow_html=True) # Beautiful sidebar footer # with st.sidebar: # st.markdown("---") # st.markdown(""" #
#

📄 Paper on VayuChat

#

Learn more about VayuChat in our Research Paper.

#
# """, unsafe_allow_html=True) # Dataset Info Section (matching mockup) st.markdown("### Dataset Info") st.markdown("""

PM2.5 Air Quality Data

Time Range: 2022 - 2023

Locations: 300+ cities across India

Records: 100,000+ measurements

""", unsafe_allow_html=True)