AbhayVG commited on
Commit
983aeb4
Β·
verified Β·
1 Parent(s): 62f5efd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +756 -672
app.py CHANGED
@@ -1,673 +1,757 @@
1
- import streamlit as st
2
- import os
3
- import json
4
- import pandas as pd
5
- import random
6
- from os.path import join
7
- from datetime import datetime
8
- from src import (
9
- preprocess_and_load_df,
10
- load_agent,
11
- ask_agent,
12
- decorate_with_code,
13
- show_response,
14
- get_from_user,
15
- load_smart_df,
16
- ask_question,
17
- )
18
- from dotenv import load_dotenv
19
- from langchain_groq import ChatGroq
20
- from langchain_google_genai import ChatGoogleGenerativeAI
21
- from streamlit_feedback import streamlit_feedback
22
- from huggingface_hub import HfApi
23
- from datasets import load_dataset, get_dataset_config_info, Dataset
24
- from PIL import Image
25
- import time
26
-
27
- # Page config with beautiful theme
28
- st.set_page_config(
29
- page_title="VayuBuddy - AI Air Quality Assistant",
30
- page_icon="🌬️",
31
- layout="wide",
32
- initial_sidebar_state="expanded"
33
- )
34
-
35
- # Custom CSS for beautiful styling
36
- st.markdown("""
37
- <style>
38
- /* Clean app background */
39
- .stApp {
40
- background-color: #ffffff;
41
- color: #212529;
42
- font-family: 'Segoe UI', sans-serif;
43
- }
44
-
45
- /* Sidebar */
46
- [data-testid="stSidebar"] {
47
- background-color: #f8f9fa;
48
- border-right: 1px solid #dee2e6;
49
- padding: 1rem;
50
- }
51
-
52
- /* Main title */
53
- .main-title {
54
- text-align: center;
55
- color: #343a40;
56
- font-size: 2.5rem;
57
- font-weight: 700;
58
- margin-bottom: 0.5rem;
59
- }
60
-
61
- /* Subtitle */
62
- .subtitle {
63
- text-align: center;
64
- color: #6c757d;
65
- font-size: 1.1rem;
66
- margin-bottom: 1.5rem;
67
- }
68
-
69
- /* Instructions */
70
- .instructions {
71
- background-color: #f1f3f5;
72
- border-left: 4px solid #0d6efd;
73
- padding: 1rem;
74
- margin-bottom: 1.5rem;
75
- border-radius: 6px;
76
- color: #495057;
77
- text-align: left;
78
- }
79
-
80
- /* Quick prompt buttons */
81
- .quick-prompt-container {
82
- display: flex;
83
- flex-wrap: wrap;
84
- gap: 8px;
85
- margin-bottom: 1.5rem;
86
- padding: 1rem;
87
- background-color: #f8f9fa;
88
- border-radius: 10px;
89
- border: 1px solid #dee2e6;
90
- }
91
-
92
- .quick-prompt-btn {
93
- background-color: #0d6efd;
94
- color: white;
95
- border: none;
96
- padding: 8px 16px;
97
- border-radius: 20px;
98
- font-size: 0.9rem;
99
- cursor: pointer;
100
- transition: all 0.2s ease;
101
- white-space: nowrap;
102
- }
103
-
104
- .quick-prompt-btn:hover {
105
- background-color: #0b5ed7;
106
- transform: translateY(-2px);
107
- }
108
-
109
- /* User message styling */
110
- .user-message {
111
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
112
- color: white;
113
- padding: 15px 20px;
114
- border-radius: 20px 20px 5px 20px;
115
- margin: 10px 0;
116
- margin-left: auto;
117
- margin-right: 0;
118
- max-width: 80%;
119
- position: relative;
120
- box-shadow: 0 2px 10px rgba(0,0,0,0.1);
121
- }
122
-
123
- .user-info {
124
- font-size: 0.8rem;
125
- opacity: 0.8;
126
- margin-bottom: 5px;
127
- text-align: right;
128
- }
129
-
130
- /* Assistant message styling */
131
- .assistant-message {
132
- background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
133
- color: white;
134
- padding: 15px 20px;
135
- border-radius: 20px 20px 20px 5px;
136
- margin: 10px 0;
137
- margin-left: 0;
138
- margin-right: auto;
139
- max-width: 80%;
140
- position: relative;
141
- box-shadow: 0 2px 10px rgba(0,0,0,0.1);
142
- }
143
-
144
- .assistant-info {
145
- font-size: 0.8rem;
146
- opacity: 0.8;
147
- margin-bottom: 5px;
148
- }
149
-
150
- /* Processing indicator */
151
- .processing-indicator {
152
- background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
153
- color: #333;
154
- padding: 15px 20px;
155
- border-radius: 20px 20px 20px 5px;
156
- margin: 10px 0;
157
- margin-left: 0;
158
- margin-right: auto;
159
- max-width: 80%;
160
- position: relative;
161
- box-shadow: 0 2px 10px rgba(0,0,0,0.1);
162
- animation: pulse 2s infinite;
163
- }
164
-
165
- @keyframes pulse {
166
- 0% { opacity: 1; }
167
- 50% { opacity: 0.7; }
168
- 100% { opacity: 1; }
169
- }
170
-
171
- /* Feedback box */
172
- .feedback-section {
173
- background-color: #f8f9fa;
174
- border: 1px solid #dee2e6;
175
- padding: 1rem;
176
- border-radius: 8px;
177
- margin: 1rem 0;
178
- }
179
-
180
- /* Success and error messages */
181
- .success-message {
182
- background-color: #d1e7dd;
183
- color: #0f5132;
184
- padding: 1rem;
185
- border-radius: 6px;
186
- border: 1px solid #badbcc;
187
- }
188
-
189
- .error-message {
190
- background-color: #f8d7da;
191
- color: #842029;
192
- padding: 1rem;
193
- border-radius: 6px;
194
- border: 1px solid #f5c2c7;
195
- }
196
-
197
- /* Chat input */
198
- .stChatInput {
199
- border-radius: 6px;
200
- border: 1px solid #ced4da;
201
- background: #ffffff;
202
- }
203
-
204
- /* Button */
205
- .stButton > button {
206
- background-color: #0d6efd;
207
- color: white;
208
- border-radius: 6px;
209
- padding: 0.5rem 1.25rem;
210
- border: none;
211
- font-weight: 600;
212
- transition: background-color 0.2s ease;
213
- }
214
-
215
- .stButton > button:hover {
216
- background-color: #0b5ed7;
217
- }
218
-
219
- /* Code details styling */
220
- .code-details {
221
- background-color: #f8f9fa;
222
- border: 1px solid #dee2e6;
223
- border-radius: 8px;
224
- padding: 10px;
225
- margin-top: 10px;
226
- }
227
-
228
- /* Hide default menu and footer */
229
- #MainMenu {visibility: hidden;}
230
- footer {visibility: hidden;}
231
- header {visibility: hidden;}
232
-
233
- /* Auto scroll */
234
- .main-container {
235
- height: 70vh;
236
- overflow-y: auto;
237
- }
238
- </style>
239
- """, unsafe_allow_html=True)
240
-
241
- # Auto-scroll JavaScript
242
- st.markdown("""
243
- <script>
244
- function scrollToBottom() {
245
- setTimeout(function() {
246
- const mainContainer = document.querySelector('.main-container');
247
- if (mainContainer) {
248
- mainContainer.scrollTop = mainContainer.scrollHeight;
249
- }
250
- window.scrollTo(0, document.body.scrollHeight);
251
- }, 100);
252
- }
253
- </script>
254
- """, unsafe_allow_html=True)
255
-
256
- # FORCE reload environment variables
257
- load_dotenv(override=True)
258
-
259
- # Get API keys
260
- Groq_Token = os.getenv("GROQ_API_KEY")
261
- hf_token = os.getenv("HF_TOKEN")
262
- gemini_token = os.getenv("GEMINI_TOKEN")
263
-
264
- models = {
265
- "llama3.1": "llama-3.1-8b-instant",
266
- "mistral": "mistral-saba-24b",
267
- "llama3.3": "llama-3.3-70b-versatile",
268
- "gemma": "gemma2-9b-it",
269
- "gemini-pro": "gemini-1.5-pro",
270
- }
271
-
272
- self_path = os.path.dirname(os.path.abspath(__file__))
273
-
274
- # Beautiful header
275
- st.markdown("<h1 class='main-title'>🌬️ VayuBuddy</h1>", unsafe_allow_html=True)
276
-
277
- st.markdown("""
278
- <div class='subtitle'>
279
- <strong>AI-Powered Air Quality Insights</strong><br>
280
- Simplifying pollution analysis using conversational AI.
281
- </div>
282
- """, unsafe_allow_html=True)
283
-
284
- st.markdown("""
285
- <div class='instructions'>
286
- <strong>How to Use:</strong><br>
287
- Select a model from the sidebar and ask questions directly in the chat. Use quick prompts below for common queries.
288
- </div>
289
- """, unsafe_allow_html=True)
290
-
291
- os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
292
-
293
- # Load data with error handling
294
- try:
295
- df = preprocess_and_load_df(join(self_path, "Data.csv"))
296
- st.success("βœ… Data loaded successfully!")
297
- except Exception as e:
298
- st.error(f"❌ Error loading data: {e}")
299
- st.stop()
300
-
301
- inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
302
- image_path = "IITGN_Logo.png"
303
-
304
- # Beautiful sidebar
305
- with st.sidebar:
306
- # Logo and title
307
- col1, col2, col3 = st.columns([1, 2, 1])
308
- with col2:
309
- if os.path.exists(image_path):
310
- st.image(image_path, use_column_width=True)
311
-
312
- # Model selection
313
- st.markdown("### πŸ€– AI Model Selection")
314
-
315
- # Filter available models
316
- available_models = []
317
- if Groq_Token and Groq_Token.strip():
318
- available_models.extend(["llama3.1", "llama3.3", "mistral", "gemma"])
319
- if gemini_token and gemini_token.strip():
320
- available_models.append("gemini-pro")
321
-
322
- if not available_models:
323
- st.error("❌ No API keys available! Please set up your API keys in the .env file")
324
- st.stop()
325
-
326
- model_name = st.selectbox(
327
- "Choose your AI assistant:",
328
- available_models,
329
- help="Different models have different strengths. Try them all!"
330
- )
331
-
332
- # Model descriptions
333
- model_descriptions = {
334
- "llama3.1": "πŸ¦™ Fast and efficient for general queries",
335
- "llama3.3": "πŸ¦™ Most advanced Llama model",
336
- "mistral": "⚑ Balanced performance and speed",
337
- "gemma": "πŸ’Ž Google's lightweight model",
338
- "gemini-pro": "🧠 Google's most powerful model"
339
- }
340
-
341
- if model_name in model_descriptions:
342
- st.info(model_descriptions[model_name])
343
-
344
- st.markdown("---")
345
-
346
- # Clear Chat Button
347
- if st.button("🧹 Clear Chat"):
348
- st.session_state.responses = []
349
- st.session_state.processing = False
350
- try:
351
- st.rerun()
352
- except AttributeError:
353
- st.experimental_rerun()
354
-
355
- st.markdown("---")
356
-
357
- # Chat History in Sidebar
358
- with st.expander("πŸ“œ Chat History"):
359
- for i, response in enumerate(st.session_state.get("responses", [])):
360
- if response.get("role") == "user":
361
- st.markdown(f"**You:** {response.get('content', '')[:50]}...")
362
- elif response.get("role") == "assistant":
363
- content = response.get('content', '')
364
- if isinstance(content, str) and len(content) > 50:
365
- st.markdown(f"**VayuBuddy:** {content[:50]}...")
366
- else:
367
- st.markdown(f"**VayuBuddy:** {str(content)[:50]}...")
368
- st.markdown("---")
369
-
370
- # Load quick prompts
371
- questions = []
372
- questions_file = join(self_path, "questions.txt")
373
- if os.path.exists(questions_file):
374
- try:
375
- with open(questions_file, 'r', encoding='utf-8') as f:
376
- content = f.read()
377
- questions = [q.strip() for q in content.split("\n") if q.strip()]
378
- print(f"Loaded {len(questions)} quick prompts") # Debug
379
- except Exception as e:
380
- st.error(f"Error loading questions: {e}")
381
- questions = []
382
-
383
- # Add some default prompts if file doesn't exist or is empty
384
- if not questions:
385
- questions = [
386
- "What is the average PM2.5 level in the dataset?",
387
- "Show me the air quality trend over time",
388
- "Which pollutant has the highest concentration?",
389
- "Create a correlation plot between different pollutants",
390
- "What are the peak pollution hours?",
391
- "Compare weekday vs weekend pollution levels"
392
- ]
393
-
394
- # Quick prompts section (horizontal)
395
- st.markdown("### πŸ’­ Quick Prompts")
396
-
397
- # Create columns for horizontal layout
398
- cols_per_row = 2 # Reduced to 2 for better fit
399
- rows = [questions[i:i + cols_per_row] for i in range(0, len(questions), cols_per_row)]
400
-
401
- selected_prompt = None
402
- for row_idx, row in enumerate(rows):
403
- cols = st.columns(len(row))
404
- for col_idx, question in enumerate(row):
405
- with cols[col_idx]:
406
- # Create unique key using row and column indices
407
- unique_key = f"prompt_btn_{row_idx}_{col_idx}"
408
- button_text = f"πŸ“ {question[:35]}{'...' if len(question) > 35 else ''}"
409
-
410
- if st.button(button_text,
411
- key=unique_key,
412
- help=question,
413
- use_container_width=True):
414
- selected_prompt = question
415
-
416
- st.markdown("---")
417
-
418
- # Initialize chat history and processing state
419
- if "responses" not in st.session_state:
420
- st.session_state.responses = []
421
- if "processing" not in st.session_state:
422
- st.session_state.processing = False
423
-
424
- def upload_feedback():
425
- try:
426
- data = {
427
- "feedback": feedback.get("score", ""),
428
- "comment": feedback.get("text", ""),
429
- "error": error,
430
- "output": output,
431
- "prompt": last_prompt,
432
- "code": code,
433
- }
434
-
435
- random_folder_name = str(datetime.now()).replace(" ", "_").replace(":", "-").replace(".", "-")
436
- save_path = f"/tmp/vayubuddy_feedback.md"
437
- path_in_repo = f"data/{random_folder_name}/feedback.md"
438
-
439
- with open(save_path, "w") as f:
440
- template = f"""Prompt: {last_prompt}
441
-
442
- Output: {output}
443
-
444
- Code:
445
-
446
- ```py
447
- {code}
448
- ```
449
-
450
- Error: {error}
451
-
452
- Feedback: {feedback.get('score', '')}
453
-
454
- Comments: {feedback.get('text', '')}
455
- """
456
- print(template, file=f)
457
-
458
- if hf_token:
459
- api = HfApi(token=hf_token)
460
- api.upload_file(
461
- path_or_fileobj=save_path,
462
- path_in_repo=path_in_repo,
463
- repo_id="SustainabilityLabIITGN/VayuBuddy_Feedback",
464
- repo_type="dataset",
465
- )
466
- if status.get("is_image", False):
467
- api.upload_file(
468
- path_or_fileobj=output,
469
- path_in_repo=f"data/{random_folder_name}/plot.png",
470
- repo_id="SustainabilityLabIITGN/VayuBuddy_Feedback",
471
- repo_type="dataset",
472
- )
473
- st.success("πŸŽ‰ Feedback uploaded successfully!")
474
- else:
475
- st.warning("⚠️ Cannot upload feedback - HF_TOKEN not available")
476
- except Exception as e:
477
- st.error(f"❌ Error uploading feedback: {e}")
478
-
479
- def show_custom_response(response):
480
- """Custom response display function"""
481
- role = response.get("role", "assistant")
482
- content = response.get("content", "")
483
-
484
- if role == "user":
485
- st.markdown(f"""
486
- <div class='user-message'>
487
- <div class='user-info'>You</div>
488
- {content}
489
- </div>
490
- """, unsafe_allow_html=True)
491
- elif role == "assistant":
492
- st.markdown(f"""
493
- <div class='assistant-message'>
494
- <div class='assistant-info'>πŸ€– VayuBuddy</div>
495
- {content if isinstance(content, str) else str(content)}
496
- </div>
497
- """, unsafe_allow_html=True)
498
-
499
- # Show generated code if available
500
- if response.get("gen_code"):
501
- with st.expander("πŸ“‹ View Generated Code"):
502
- st.code(response["gen_code"], language="python")
503
-
504
- # Try to display image if content is a file path
505
- try:
506
- if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
507
- if os.path.exists(content):
508
- st.image(content)
509
- return {"is_image": True}
510
- except:
511
- pass
512
-
513
- return {"is_image": False}
514
-
515
- def show_processing_indicator(model_name, question):
516
- """Show processing indicator"""
517
- st.markdown(f"""
518
- <div class='processing-indicator'>
519
- <div class='assistant-info'>πŸ€– VayuBuddy β€’ Processing with {model_name}</div>
520
- <strong>Question:</strong> {question}<br>
521
- <em>πŸ”„ Generating response...</em>
522
- </div>
523
- """, unsafe_allow_html=True)
524
-
525
- # Main chat container
526
- chat_container = st.container()
527
-
528
- with chat_container:
529
- # Display chat history
530
- for response_id, response in enumerate(st.session_state.responses):
531
- status = show_custom_response(response)
532
-
533
- # Show feedback section for assistant responses
534
- if response["role"] == "assistant":
535
- feedback_key = f"feedback_{int(response_id/2)}"
536
- error = response.get("error", "No error information")
537
- output = response.get("content", "No output")
538
- last_prompt = response.get("last_prompt", "No prompt")
539
- code = response.get("gen_code", "No code generated")
540
-
541
- if "feedback" in st.session_state.responses[response_id]:
542
- st.markdown(f"""
543
- <div class='feedback-section'>
544
- <strong>πŸ“ Your Feedback:</strong> {st.session_state.responses[response_id]["feedback"]}
545
- </div>
546
- """, unsafe_allow_html=True)
547
- else:
548
- # Beautiful feedback section
549
- col1, col2 = st.columns(2)
550
- with col1:
551
- thumbs_up = st.button("πŸ‘ Helpful", key=f"{feedback_key}_up", use_container_width=True)
552
- with col2:
553
- thumbs_down = st.button("πŸ‘Ž Not Helpful", key=f"{feedback_key}_down", use_container_width=True)
554
-
555
- if thumbs_up or thumbs_down:
556
- thumbs = "πŸ‘" if thumbs_up else "πŸ‘Ž"
557
- comments = st.text_area(
558
- "πŸ’¬ Tell us more (optional):",
559
- key=f"{feedback_key}_comments",
560
- placeholder="What could be improved?"
561
- )
562
- feedback = {"score": thumbs, "text": comments}
563
- if st.button("πŸš€ Submit Feedback", key=f"{feedback_key}_submit"):
564
- upload_feedback()
565
- st.session_state.responses[response_id]["feedback"] = feedback
566
- st.rerun()
567
-
568
- # Show processing indicator if processing
569
- if st.session_state.get("processing"):
570
- show_processing_indicator(
571
- st.session_state.get("current_model", "Unknown"),
572
- st.session_state.get("current_question", "Processing...")
573
- )
574
-
575
- # Chat input (always visible at bottom)
576
- prompt = st.chat_input("πŸ’¬ Ask me anything about air quality!", key="main_chat")
577
-
578
- # Handle selected prompt from quick prompts
579
- if selected_prompt:
580
- prompt = selected_prompt
581
-
582
- # Handle new queries
583
- if prompt and not st.session_state.get("processing"):
584
- # Prevent duplicate processing
585
- if "last_prompt" in st.session_state:
586
- last_prompt = st.session_state["last_prompt"]
587
- last_model_name = st.session_state.get("last_model_name", "")
588
- if (prompt == last_prompt) and (model_name == last_model_name):
589
- prompt = None
590
-
591
- if prompt:
592
- # Add user input to chat history
593
- user_response = get_from_user(prompt)
594
- st.session_state.responses.append(user_response)
595
-
596
- # Set processing state
597
- st.session_state.processing = True
598
- st.session_state.current_model = model_name
599
- st.session_state.current_question = prompt
600
-
601
- # Rerun to show processing indicator
602
- st.rerun()
603
-
604
- # Process the question if we're in processing state
605
- if st.session_state.get("processing"):
606
- prompt = st.session_state.get("current_question")
607
- model_name = st.session_state.get("current_model")
608
-
609
- try:
610
- response = ask_question(model_name=model_name, question=prompt)
611
-
612
- if not isinstance(response, dict):
613
- response = {
614
- "role": "assistant",
615
- "content": "❌ Error: Invalid response format",
616
- "gen_code": "",
617
- "ex_code": "",
618
- "last_prompt": prompt,
619
- "error": "Invalid response format"
620
- }
621
-
622
- response.setdefault("role", "assistant")
623
- response.setdefault("content", "No content generated")
624
- response.setdefault("gen_code", "")
625
- response.setdefault("ex_code", "")
626
- response.setdefault("last_prompt", prompt)
627
- response.setdefault("error", None)
628
-
629
- except Exception as e:
630
- response = {
631
- "role": "assistant",
632
- "content": f"Sorry, I encountered an error: {str(e)}",
633
- "gen_code": "",
634
- "ex_code": "",
635
- "last_prompt": prompt,
636
- "error": str(e)
637
- }
638
-
639
- st.session_state.responses.append(response)
640
- st.session_state["last_prompt"] = prompt
641
- st.session_state["last_model_name"] = model_name
642
- st.session_state.processing = False
643
-
644
- # Clear processing state
645
- if "current_model" in st.session_state:
646
- del st.session_state.current_model
647
- if "current_question" in st.session_state:
648
- del st.session_state.current_question
649
-
650
- st.rerun()
651
-
652
- # Auto-scroll to bottom
653
- if st.session_state.responses:
654
- st.markdown("<script>scrollToBottom();</script>", unsafe_allow_html=True)
655
-
656
- # Beautiful sidebar footer
657
- with st.sidebar:
658
- st.markdown("---")
659
- st.markdown("""
660
- <div class='contact-section'>
661
- <h4>πŸ“„ Paper on VayuBuddy</h4>
662
- <p>Learn more about VayuBuddy in our <a href='https://arxiv.org/abs/2411.12760' target='_blank'>Research Paper</a>.</p>
663
- </div>
664
- """, unsafe_allow_html=True)
665
-
666
- # Footer
667
- st.markdown("""
668
- <div style='text-align: center; margin-top: 3rem; padding: 2rem; background: rgba(255,255,255,0.1); border-radius: 15px;'>
669
- <h3>🌍 Together for Cleaner Air</h3>
670
- <p>VayuBuddy - Empowering environmental awareness through AI</p>
671
- <small>Β© 2024 IIT Gandhinagar Sustainability Lab</small>
672
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
673
  """, unsafe_allow_html=True)
 
1
+ import streamlit as st
2
+ import os
3
+ import json
4
+ import pandas as pd
5
+ import random
6
+ from os.path import join
7
+ from datetime import datetime
8
+ from src import (
9
+ preprocess_and_load_df,
10
+ load_agent,
11
+ ask_agent,
12
+ decorate_with_code,
13
+ show_response,
14
+ get_from_user,
15
+ load_smart_df,
16
+ ask_question,
17
+ )
18
+ from dotenv import load_dotenv
19
+ from langchain_groq import ChatGroq
20
+ from langchain_google_genai import ChatGoogleGenerativeAI
21
+ from streamlit_feedback import streamlit_feedback
22
+ from huggingface_hub import HfApi
23
+ from datasets import load_dataset, get_dataset_config_info, Dataset
24
+ from PIL import Image
25
+ import time
26
+ import uuid
27
+
28
+ # Page config with beautiful theme
29
+ st.set_page_config(
30
+ page_title="VayuBuddy - AI Air Quality Assistant",
31
+ page_icon="🌬️",
32
+ layout="wide",
33
+ initial_sidebar_state="expanded"
34
+ )
35
+
36
+ # Custom CSS for beautiful styling
37
+ st.markdown("""
38
+ <style>
39
+ /* Clean app background */
40
+ .stApp {
41
+ background-color: #ffffff;
42
+ color: #212529;
43
+ font-family: 'Segoe UI', sans-serif;
44
+ }
45
+
46
+ /* Sidebar */
47
+ [data-testid="stSidebar"] {
48
+ background-color: #f8f9fa;
49
+ border-right: 1px solid #dee2e6;
50
+ padding: 1rem;
51
+ }
52
+
53
+ /* Main title */
54
+ .main-title {
55
+ text-align: center;
56
+ color: #343a40;
57
+ font-size: 2.5rem;
58
+ font-weight: 700;
59
+ margin-bottom: 0.5rem;
60
+ }
61
+
62
+ /* Subtitle */
63
+ .subtitle {
64
+ text-align: center;
65
+ color: #6c757d;
66
+ font-size: 1.1rem;
67
+ margin-bottom: 1.5rem;
68
+ }
69
+
70
+ /* Instructions */
71
+ .instructions {
72
+ background-color: #f1f3f5;
73
+ border-left: 4px solid #0d6efd;
74
+ padding: 1rem;
75
+ margin-bottom: 1.5rem;
76
+ border-radius: 6px;
77
+ color: #495057;
78
+ text-align: left;
79
+ }
80
+
81
+ /* Quick prompt buttons */
82
+ .quick-prompt-container {
83
+ display: flex;
84
+ flex-wrap: wrap;
85
+ gap: 8px;
86
+ margin-bottom: 1.5rem;
87
+ padding: 1rem;
88
+ background-color: #f8f9fa;
89
+ border-radius: 10px;
90
+ border: 1px solid #dee2e6;
91
+ }
92
+
93
+ .quick-prompt-btn {
94
+ background-color: #0d6efd;
95
+ color: white;
96
+ border: none;
97
+ padding: 8px 16px;
98
+ border-radius: 20px;
99
+ font-size: 0.9rem;
100
+ cursor: pointer;
101
+ transition: all 0.2s ease;
102
+ white-space: nowrap;
103
+ }
104
+
105
+ .quick-prompt-btn:hover {
106
+ background-color: #0b5ed7;
107
+ transform: translateY(-2px);
108
+ }
109
+
110
+ /* User message styling */
111
+ .user-message {
112
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
113
+ color: white;
114
+ padding: 15px 20px;
115
+ border-radius: 20px 20px 5px 20px;
116
+ margin: 10px 0;
117
+ margin-left: auto;
118
+ margin-right: 0;
119
+ max-width: 80%;
120
+ position: relative;
121
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
122
+ }
123
+
124
+ .user-info {
125
+ font-size: 0.8rem;
126
+ opacity: 0.8;
127
+ margin-bottom: 5px;
128
+ text-align: right;
129
+ }
130
+
131
+ /* Assistant message styling */
132
+ .assistant-message {
133
+ background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
134
+ color: white;
135
+ padding: 15px 20px;
136
+ border-radius: 20px 20px 20px 5px;
137
+ margin: 10px 0;
138
+ margin-left: 0;
139
+ margin-right: auto;
140
+ max-width: 80%;
141
+ position: relative;
142
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
143
+ }
144
+
145
+ .assistant-info {
146
+ font-size: 0.8rem;
147
+ opacity: 0.8;
148
+ margin-bottom: 5px;
149
+ }
150
+
151
+ /* Processing indicator */
152
+ .processing-indicator {
153
+ background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
154
+ color: #333;
155
+ padding: 15px 20px;
156
+ border-radius: 20px 20px 20px 5px;
157
+ margin: 10px 0;
158
+ margin-left: 0;
159
+ margin-right: auto;
160
+ max-width: 80%;
161
+ position: relative;
162
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
163
+ animation: pulse 2s infinite;
164
+ }
165
+
166
+ @keyframes pulse {
167
+ 0% { opacity: 1; }
168
+ 50% { opacity: 0.7; }
169
+ 100% { opacity: 1; }
170
+ }
171
+
172
+ /* Feedback box */
173
+ .feedback-section {
174
+ background-color: #f8f9fa;
175
+ border: 1px solid #dee2e6;
176
+ padding: 1rem;
177
+ border-radius: 8px;
178
+ margin: 1rem 0;
179
+ }
180
+
181
+ /* Success and error messages */
182
+ .success-message {
183
+ background-color: #d1e7dd;
184
+ color: #0f5132;
185
+ padding: 1rem;
186
+ border-radius: 6px;
187
+ border: 1px solid #badbcc;
188
+ }
189
+
190
+ .error-message {
191
+ background-color: #f8d7da;
192
+ color: #842029;
193
+ padding: 1rem;
194
+ border-radius: 6px;
195
+ border: 1px solid #f5c2c7;
196
+ }
197
+
198
+ /* Chat input */
199
+ .stChatInput {
200
+ border-radius: 6px;
201
+ border: 1px solid #ced4da;
202
+ background: #ffffff;
203
+ }
204
+
205
+ /* Button */
206
+ .stButton > button {
207
+ background-color: #0d6efd;
208
+ color: white;
209
+ border-radius: 6px;
210
+ padding: 0.5rem 1.25rem;
211
+ border: none;
212
+ font-weight: 600;
213
+ transition: background-color 0.2s ease;
214
+ }
215
+
216
+ .stButton > button:hover {
217
+ background-color: #0b5ed7;
218
+ }
219
+
220
+ /* Code details styling */
221
+ .code-details {
222
+ background-color: #f8f9fa;
223
+ border: 1px solid #dee2e6;
224
+ border-radius: 8px;
225
+ padding: 10px;
226
+ margin-top: 10px;
227
+ }
228
+
229
+ /* Hide default menu and footer */
230
+ #MainMenu {visibility: hidden;}
231
+ footer {visibility: hidden;}
232
+ header {visibility: hidden;}
233
+
234
+ /* Auto scroll */
235
+ .main-container {
236
+ height: 70vh;
237
+ overflow-y: auto;
238
+ }
239
+ </style>
240
+ """, unsafe_allow_html=True)
241
+
242
+ # Auto-scroll JavaScript
243
+ st.markdown("""
244
+ <script>
245
+ function scrollToBottom() {
246
+ setTimeout(function() {
247
+ const mainContainer = document.querySelector('.main-container');
248
+ if (mainContainer) {
249
+ mainContainer.scrollTop = mainContainer.scrollHeight;
250
+ }
251
+ window.scrollTo(0, document.body.scrollHeight);
252
+ }, 100);
253
+ }
254
+ </script>
255
+ """, unsafe_allow_html=True)
256
+
257
+ # FORCE reload environment variables
258
+ load_dotenv(override=True)
259
+
260
+ # Get API keys
261
+ Groq_Token = os.getenv("GROQ_API_KEY")
262
+ hf_token = os.getenv("HF_TOKEN")
263
+ gemini_token = os.getenv("GEMINI_TOKEN")
264
+
265
+ models = {
266
+ "llama3.1": "llama-3.1-8b-instant",
267
+ "mistral": "mistral-saba-24b",
268
+ "llama3.3": "llama-3.3-70b-versatile",
269
+ "gemma": "gemma2-9b-it",
270
+ "gemini-pro": "gemini-1.5-pro",
271
+ }
272
+
273
+ self_path = os.path.dirname(os.path.abspath(__file__))
274
+
275
+ # Initialize session ID for this session
276
+ if "session_id" not in st.session_state:
277
+ st.session_state.session_id = str(uuid.uuid4())
278
+
279
+ def upload_feedback(feedback, error, output, last_prompt, code, status):
280
+ """Enhanced feedback upload function with better logging and error handling"""
281
+ try:
282
+ if not hf_token or hf_token.strip() == "":
283
+ st.warning("⚠️ Cannot upload feedback - HF_TOKEN not available")
284
+ return False
285
+
286
+ # Create comprehensive feedback data
287
+ feedback_data = {
288
+ "timestamp": datetime.now().isoformat(),
289
+ "session_id": st.session_state.session_id,
290
+ "feedback_score": feedback.get("score", ""),
291
+ "feedback_comment": feedback.get("text", ""),
292
+ "user_prompt": last_prompt,
293
+ "ai_output": str(output),
294
+ "generated_code": code or "",
295
+ "error_message": error or "",
296
+ "is_image_output": status.get("is_image", False),
297
+ "success": not bool(error)
298
+ }
299
+
300
+ # Create unique folder name with timestamp
301
+ timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
302
+ random_id = str(uuid.uuid4())[:8]
303
+ folder_name = f"feedback_{timestamp_str}_{random_id}"
304
+
305
+ # Create markdown feedback file
306
+ markdown_content = f"""# VayuBuddy Feedback Report
307
+
308
+ ## Session Information
309
+ - **Timestamp**: {feedback_data['timestamp']}
310
+ - **Session ID**: {feedback_data['session_id']}
311
+
312
+ ## User Interaction
313
+ **Prompt**: {feedback_data['user_prompt']}
314
+
315
+ ## AI Response
316
+ **Output**: {feedback_data['ai_output']}
317
+
318
+ ## Generated Code
319
+ ```python
320
+ {feedback_data['generated_code']}
321
+ ```
322
+
323
+ ## Technical Details
324
+ - **Error Message**: {feedback_data['error_message']}
325
+ - **Is Image Output**: {feedback_data['is_image_output']}
326
+ - **Success**: {feedback_data['success']}
327
+
328
+ ## User Feedback
329
+ - **Score**: {feedback_data['feedback_score']}
330
+ - **Comments**: {feedback_data['feedback_comment']}
331
+ """
332
+
333
+ # Save markdown file locally
334
+ markdown_filename = f"{folder_name}.md"
335
+ markdown_local_path = f"/tmp/{markdown_filename}"
336
+
337
+ with open(markdown_local_path, "w", encoding="utf-8") as f:
338
+ f.write(markdown_content)
339
+
340
+ # Upload to Hugging Face
341
+ api = HfApi(token=hf_token)
342
+
343
+ # Upload markdown feedback
344
+ api.upload_file(
345
+ path_or_fileobj=markdown_local_path,
346
+ path_in_repo=f"data/{markdown_filename}",
347
+ repo_id="SustainabilityLabIITGN/VayuBuddy_Feedback",
348
+ repo_type="dataset",
349
+ )
350
+
351
+ # Upload image if it exists and is an image output
352
+ if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output):
353
+ try:
354
+ image_filename = f"{folder_name}_plot.png"
355
+ api.upload_file(
356
+ path_or_fileobj=output,
357
+ path_in_repo=f"data/{image_filename}",
358
+ repo_id="SustainabilityLabIITGN/VayuBuddy_Feedback",
359
+ repo_type="dataset",
360
+ )
361
+ except Exception as img_error:
362
+ print(f"Error uploading image: {img_error}")
363
+
364
+ # Clean up local files
365
+ if os.path.exists(markdown_local_path):
366
+ os.remove(markdown_local_path)
367
+
368
+ st.success("πŸŽ‰ Feedback uploaded successfully!")
369
+ return True
370
+
371
+ except Exception as e:
372
+ st.error(f"❌ Error uploading feedback: {e}")
373
+ print(f"Feedback upload error: {e}")
374
+ return False
375
+
376
+ # Beautiful header
377
+ st.markdown("<h1 class='main-title'>🌬️ VayuBuddy</h1>", unsafe_allow_html=True)
378
+
379
+ st.markdown("""
380
+ <div class='subtitle'>
381
+ <strong>AI-Powered Air Quality Insights</strong><br>
382
+ Simplifying pollution analysis using conversational AI.
383
+ </div>
384
+ """, unsafe_allow_html=True)
385
+
386
+ st.markdown("""
387
+ <div class='instructions'>
388
+ <strong>How to Use:</strong><br>
389
+ Select a model from the sidebar and ask questions directly in the chat. Use quick prompts below for common queries.
390
+ </div>
391
+ """, unsafe_allow_html=True)
392
+
393
+ os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
394
+
395
+ # Load data with error handling
396
+ try:
397
+ df = preprocess_and_load_df(join(self_path, "Data.csv"))
398
+ st.success("βœ… Data loaded successfully!")
399
+ except Exception as e:
400
+ st.error(f"❌ Error loading data: {e}")
401
+ st.stop()
402
+
403
+ inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
404
+ image_path = "IITGN_Logo.png"
405
+
406
+ # Beautiful sidebar
407
+ with st.sidebar:
408
+ # Logo and title
409
+ col1, col2, col3 = st.columns([1, 2, 1])
410
+ with col2:
411
+ if os.path.exists(image_path):
412
+ st.image(image_path, use_column_width=True)
413
+
414
+ # Session info
415
+ st.markdown(f"**Session ID**: `{st.session_state.session_id[:8]}...`")
416
+
417
+ # Model selection
418
+ st.markdown("### πŸ€– AI Model Selection")
419
+
420
+ # Filter available models
421
+ available_models = []
422
+ if Groq_Token and Groq_Token.strip():
423
+ available_models.extend(["llama3.1", "llama3.3", "mistral", "gemma"])
424
+ if gemini_token and gemini_token.strip():
425
+ available_models.append("gemini-pro")
426
+
427
+ if not available_models:
428
+ st.error("❌ No API keys available! Please set up your API keys in the .env file")
429
+ st.stop()
430
+
431
+ model_name = st.selectbox(
432
+ "Choose your AI assistant:",
433
+ available_models,
434
+ help="Different models have different strengths. Try them all!"
435
+ )
436
+
437
+ # Model descriptions
438
+ model_descriptions = {
439
+ "llama3.1": "πŸ¦™ Fast and efficient for general queries",
440
+ "llama3.3": "πŸ¦™ Most advanced Llama model",
441
+ "mistral": "⚑ Balanced performance and speed",
442
+ "gemma": "πŸ’Ž Google's lightweight model",
443
+ "gemini-pro": "🧠 Google's most powerful model"
444
+ }
445
+
446
+ if model_name in model_descriptions:
447
+ st.info(model_descriptions[model_name])
448
+
449
+ st.markdown("---")
450
+
451
+ # Logging status
452
+ st.markdown("### πŸ“Š Logging Status")
453
+ if hf_token and hf_token.strip():
454
+ st.success("βœ… Logging enabled")
455
+ st.caption("Interactions are being logged to HuggingFace")
456
+ else:
457
+ st.warning("⚠️ Logging disabled")
458
+ st.caption("HF_TOKEN not available")
459
+
460
+ st.markdown("---")
461
+
462
+ # Clear Chat Button
463
+ if st.button("🧹 Clear Chat"):
464
+ st.session_state.responses = []
465
+ st.session_state.processing = False
466
+ # Generate new session ID for new chat
467
+ st.session_state.session_id = str(uuid.uuid4())
468
+ try:
469
+ st.rerun()
470
+ except AttributeError:
471
+ st.experimental_rerun()
472
+
473
+ st.markdown("---")
474
+
475
+ # Chat History in Sidebar
476
+ with st.expander("πŸ“œ Chat History"):
477
+ for i, response in enumerate(st.session_state.get("responses", [])):
478
+ if response.get("role") == "user":
479
+ st.markdown(f"**You:** {response.get('content', '')[:50]}...")
480
+ elif response.get("role") == "assistant":
481
+ content = response.get('content', '')
482
+ if isinstance(content, str) and len(content) > 50:
483
+ st.markdown(f"**VayuBuddy:** {content[:50]}...")
484
+ else:
485
+ st.markdown(f"**VayuBuddy:** {str(content)[:50]}...")
486
+ st.markdown("---")
487
+
488
+ # Load quick prompts
489
+ questions = []
490
+ questions_file = join(self_path, "questions.txt")
491
+ if os.path.exists(questions_file):
492
+ try:
493
+ with open(questions_file, 'r', encoding='utf-8') as f:
494
+ content = f.read()
495
+ questions = [q.strip() for q in content.split("\n") if q.strip()]
496
+ print(f"Loaded {len(questions)} quick prompts") # Debug
497
+ except Exception as e:
498
+ st.error(f"Error loading questions: {e}")
499
+ questions = []
500
+
501
+ # Add some default prompts if file doesn't exist or is empty
502
+ if not questions:
503
+ questions = [
504
+ "What is the average PM2.5 level in the dataset?",
505
+ "Show me the air quality trend over time",
506
+ "Which pollutant has the highest concentration?",
507
+ "Create a correlation plot between different pollutants",
508
+ "What are the peak pollution hours?",
509
+ "Compare weekday vs weekend pollution levels"
510
+ ]
511
+
512
+ # Quick prompts section (horizontal)
513
+ st.markdown("### πŸ’­ Quick Prompts")
514
+
515
+ # Create columns for horizontal layout
516
+ cols_per_row = 2 # Reduced to 2 for better fit
517
+ rows = [questions[i:i + cols_per_row] for i in range(0, len(questions), cols_per_row)]
518
+
519
+ selected_prompt = None
520
+ for row_idx, row in enumerate(rows):
521
+ cols = st.columns(len(row))
522
+ for col_idx, question in enumerate(row):
523
+ with cols[col_idx]:
524
+ # Create unique key using row and column indices
525
+ unique_key = f"prompt_btn_{row_idx}_{col_idx}"
526
+ button_text = f"πŸ“ {question[:35]}{'...' if len(question) > 35 else ''}"
527
+
528
+ if st.button(button_text,
529
+ key=unique_key,
530
+ help=question,
531
+ use_container_width=True):
532
+ selected_prompt = question
533
+
534
+ st.markdown("---")
535
+
536
+ # Initialize chat history and processing state
537
+ if "responses" not in st.session_state:
538
+ st.session_state.responses = []
539
+ if "processing" not in st.session_state:
540
+ st.session_state.processing = False
541
+
542
+ def show_custom_response(response):
543
+ """Custom response display function"""
544
+ role = response.get("role", "assistant")
545
+ content = response.get("content", "")
546
+
547
+ if role == "user":
548
+ st.markdown(f"""
549
+ <div class='user-message'>
550
+ <div class='user-info'>You</div>
551
+ {content}
552
+ </div>
553
+ """, unsafe_allow_html=True)
554
+ elif role == "assistant":
555
+ st.markdown(f"""
556
+ <div class='assistant-message'>
557
+ <div class='assistant-info'>πŸ€– VayuBuddy</div>
558
+ {content if isinstance(content, str) else str(content)}
559
+ </div>
560
+ """, unsafe_allow_html=True)
561
+
562
+ # Show generated code if available
563
+ if response.get("gen_code"):
564
+ with st.expander("πŸ“‹ View Generated Code"):
565
+ st.code(response["gen_code"], language="python")
566
+
567
+ # Try to display image if content is a file path
568
+ try:
569
+ if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
570
+ if os.path.exists(content):
571
+ st.image(content)
572
+ return {"is_image": True}
573
+ except:
574
+ pass
575
+
576
+ return {"is_image": False}
577
+
578
+ def show_processing_indicator(model_name, question):
579
+ """Show processing indicator"""
580
+ st.markdown(f"""
581
+ <div class='processing-indicator'>
582
+ <div class='assistant-info'>πŸ€– VayuBuddy β€’ Processing with {model_name}</div>
583
+ <strong>Question:</strong> {question}<br>
584
+ <em>πŸ”„ Generating response...</em>
585
+ </div>
586
+ """, unsafe_allow_html=True)
587
+
588
+ # Main chat container
589
+ chat_container = st.container()
590
+
591
+ with chat_container:
592
+ # Display chat history
593
+ for response_id, response in enumerate(st.session_state.responses):
594
+ status = show_custom_response(response)
595
+
596
+ # Show feedback section for assistant responses
597
+ if response["role"] == "assistant":
598
+ feedback_key = f"feedback_{int(response_id/2)}"
599
+ error = response.get("error", "")
600
+ output = response.get("content", "")
601
+ last_prompt = response.get("last_prompt", "")
602
+ code = response.get("gen_code", "")
603
+
604
+ if "feedback" in st.session_state.responses[response_id]:
605
+ feedback_data = st.session_state.responses[response_id]["feedback"]
606
+ st.markdown(f"""
607
+ <div class='feedback-section'>
608
+ <strong>πŸ“ Your Feedback:</strong> {feedback_data.get('score', '')}
609
+ {f"- {feedback_data.get('text', '')}" if feedback_data.get('text') else ""}
610
+ </div>
611
+ """, unsafe_allow_html=True)
612
+ else:
613
+ # Beautiful feedback section
614
+ st.markdown("---")
615
+ st.markdown("**How was this response?**")
616
+
617
+ col1, col2 = st.columns(2)
618
+ with col1:
619
+ thumbs_up = st.button("πŸ‘ Helpful", key=f"{feedback_key}_up", use_container_width=True)
620
+ with col2:
621
+ thumbs_down = st.button("πŸ‘Ž Not Helpful", key=f"{feedback_key}_down", use_container_width=True)
622
+
623
+ if thumbs_up or thumbs_down:
624
+ thumbs = "πŸ‘ Helpful" if thumbs_up else "πŸ‘Ž Not Helpful"
625
+ comments = st.text_area(
626
+ "πŸ’¬ Tell us more (optional):",
627
+ key=f"{feedback_key}_comments",
628
+ placeholder="What could be improved? Any suggestions?",
629
+ max_chars=500
630
+ )
631
+
632
+ if st.button("πŸš€ Submit Feedback", key=f"{feedback_key}_submit"):
633
+ feedback = {"score": thumbs, "text": comments}
634
+
635
+ # Upload feedback with enhanced error handling
636
+ if upload_feedback(feedback, error, output, last_prompt, code, status or {}):
637
+ st.session_state.responses[response_id]["feedback"] = feedback
638
+ time.sleep(1) # Give user time to see success message
639
+ st.rerun()
640
+ else:
641
+ st.error("Failed to submit feedback. Please try again.")
642
+
643
+ # Show processing indicator if processing
644
+ if st.session_state.get("processing"):
645
+ show_processing_indicator(
646
+ st.session_state.get("current_model", "Unknown"),
647
+ st.session_state.get("current_question", "Processing...")
648
+ )
649
+
650
+ # Chat input (always visible at bottom)
651
+ prompt = st.chat_input("πŸ’¬ Ask me anything about air quality!", key="main_chat")
652
+
653
+ # Handle selected prompt from quick prompts
654
+ if selected_prompt:
655
+ prompt = selected_prompt
656
+
657
+ # Handle new queries
658
+ if prompt and not st.session_state.get("processing"):
659
+ # Prevent duplicate processing
660
+ if "last_prompt" in st.session_state:
661
+ last_prompt = st.session_state["last_prompt"]
662
+ last_model_name = st.session_state.get("last_model_name", "")
663
+ if (prompt == last_prompt) and (model_name == last_model_name):
664
+ prompt = None
665
+
666
+ if prompt:
667
+ # Add user input to chat history
668
+ user_response = get_from_user(prompt)
669
+ st.session_state.responses.append(user_response)
670
+
671
+ # Set processing state
672
+ st.session_state.processing = True
673
+ st.session_state.current_model = model_name
674
+ st.session_state.current_question = prompt
675
+
676
+ # Rerun to show processing indicator
677
+ st.rerun()
678
+
679
+ # Process the question if we're in processing state
680
+ if st.session_state.get("processing"):
681
+ prompt = st.session_state.get("current_question")
682
+ model_name = st.session_state.get("current_model")
683
+
684
+ try:
685
+ response = ask_question(model_name=model_name, question=prompt)
686
+
687
+ if not isinstance(response, dict):
688
+ response = {
689
+ "role": "assistant",
690
+ "content": "❌ Error: Invalid response format",
691
+ "gen_code": "",
692
+ "ex_code": "",
693
+ "last_prompt": prompt,
694
+ "error": "Invalid response format"
695
+ }
696
+
697
+ response.setdefault("role", "assistant")
698
+ response.setdefault("content", "No content generated")
699
+ response.setdefault("gen_code", "")
700
+ response.setdefault("ex_code", "")
701
+ response.setdefault("last_prompt", prompt)
702
+ response.setdefault("error", None)
703
+
704
+ except Exception as e:
705
+ response = {
706
+ "role": "assistant",
707
+ "content": f"Sorry, I encountered an error: {str(e)}",
708
+ "gen_code": "",
709
+ "ex_code": "",
710
+ "last_prompt": prompt,
711
+ "error": str(e)
712
+ }
713
+
714
+ st.session_state.responses.append(response)
715
+ st.session_state["last_prompt"] = prompt
716
+ st.session_state["last_model_name"] = model_name
717
+ st.session_state.processing = False
718
+
719
+ # Clear processing state
720
+ if "current_model" in st.session_state:
721
+ del st.session_state.current_model
722
+ if "current_question" in st.session_state:
723
+ del st.session_state.current_question
724
+
725
+ st.rerun()
726
+
727
+ # Auto-scroll to bottom
728
+ if st.session_state.responses:
729
+ st.markdown("<script>scrollToBottom();</script>", unsafe_allow_html=True)
730
+
731
+ # Beautiful sidebar footer
732
+ with st.sidebar:
733
+ st.markdown("---")
734
+ st.markdown("""
735
+ <div class='contact-section'>
736
+ <h4>πŸ“„ Paper on VayuBuddy</h4>
737
+ <p>Learn more about VayuBuddy in our <a href='https://arxiv.org/abs/2411.12760' target='_blank'>Research Paper</a>.</p>
738
+ </div>
739
+ """, unsafe_allow_html=True)
740
+
741
+ # Statistics (if logging is enabled)
742
+ if hf_token and hf_token.strip():
743
+ st.markdown("### πŸ“ˆ Session Stats")
744
+ total_interactions = len([r for r in st.session_state.get("responses", []) if r.get("role") == "assistant"])
745
+ st.metric("Interactions", total_interactions)
746
+
747
+ feedbacks_given = len([r for r in st.session_state.get("responses", []) if r.get("role") == "assistant" and "feedback" in r])
748
+ st.metric("Feedbacks Given", feedbacks_given)
749
+
750
+ # Footer
751
+ st.markdown("""
752
+ <div style='text-align: center; margin-top: 3rem; padding: 2rem; background: rgba(255,255,255,0.1); border-radius: 15px;'>
753
+ <h3>🌍 Together for Cleaner Air</h3>
754
+ <p>VayuBuddy - Empowering environmental awareness through AI</p>
755
+ <small>Β© 2024 IIT Gandhinagar Sustainability Lab</small>
756
+ </div>
757
  """, unsafe_allow_html=True)