Vedant-acharya commited on
Commit
bc17742
·
verified ·
1 Parent(s): 77dc76a

Made header responsive

Browse files
Files changed (1) hide show
  1. app.py +961 -940
app.py CHANGED
@@ -36,1050 +36,1071 @@ st.set_page_config(
36
  initial_sidebar_state="expanded"
37
  )
38
 
39
- # Custom CSS for beautiful styling
40
- st.markdown("""
41
- <style>
42
- /* Clean app background */
43
- .stApp {
44
- background-color: #ffffff;
45
- color: #212529;
46
- font-family: 'Segoe UI', sans-serif;
47
- }
 
 
 
48
 
49
- /* Reduce main container padding */
50
- .main .block-container {
51
- padding-top: 0.5rem;
52
- padding-bottom: 3rem;
53
- max-width: 100%;
54
- }
 
 
 
 
 
 
 
 
55
 
56
- /* Remove excessive spacing */
57
- .element-container {
58
- margin-bottom: 0.5rem !important;
59
- }
60
 
61
- /* Fix sidebar spacing */
62
- [data-testid="stSidebar"] .element-container {
63
- margin-bottom: 0.25rem !important;
64
- }
65
 
66
- /* Sidebar */
67
- [data-testid="stSidebar"] {
68
- background-color: #f8f9fa;
69
- border-right: 1px solid #dee2e6;
70
- padding: 1rem;
 
 
 
 
 
 
 
 
 
 
71
  }
72
 
73
- /* Optimize sidebar scrolling */
74
- [data-testid="stSidebar"] > div:first-child {
75
- height: 100vh;
76
- overflow-y: auto;
77
- padding-bottom: 2rem;
78
- }
79
 
80
- [data-testid="stSidebar"]::-webkit-scrollbar {
81
- width: 6px;
82
- }
83
 
84
- [data-testid="stSidebar"]::-webkit-scrollbar-track {
85
- background: #f1f1f1;
86
- border-radius: 3px;
87
- }
 
 
88
 
89
- [data-testid="stSidebar"]::-webkit-scrollbar-thumb {
90
- background: #c1c1c1;
91
- border-radius: 3px;
92
- }
 
 
 
 
 
 
 
 
 
93
 
94
- [data-testid="stSidebar"]::-webkit-scrollbar-thumb:hover {
95
- background: #a1a1a1;
96
- }
 
 
 
 
97
 
98
- /* Main title */
99
- .main-title {
100
- text-align: center;
101
- color: #343a40;
102
- font-size: 2.5rem;
103
- font-weight: 700;
104
- margin-bottom: 0.5rem;
105
- }
106
 
107
- /* Subtitle */
108
- .subtitle {
109
- text-align: center;
110
- color: #6c757d;
111
- font-size: 1.1rem;
112
- margin-bottom: 1.5rem;
113
- }
114
 
115
- /* Instructions */
116
- .instructions {
117
- background-color: #f1f3f5;
118
- border-left: 4px solid #0d6efd;
119
- padding: 1rem;
120
- margin-bottom: 1.5rem;
121
- border-radius: 6px;
122
- color: #495057;
123
- text-align: left;
124
- }
125
 
126
- /* Quick prompt buttons */
127
- .quick-prompt-container {
128
- display: flex;
129
- flex-wrap: wrap;
130
- gap: 8px;
131
- margin-bottom: 1.5rem;
132
- padding: 1rem;
133
- background-color: #f8f9fa;
134
- border-radius: 10px;
135
- border: 1px solid #dee2e6;
136
- }
137
 
138
- .quick-prompt-btn {
139
- background-color: #0d6efd;
140
- color: white;
141
- border: none;
142
- padding: 8px 16px;
143
- border-radius: 20px;
144
- font-size: 0.9rem;
145
- cursor: pointer;
146
- transition: all 0.2s ease;
147
- white-space: nowrap;
148
- }
149
 
150
- .quick-prompt-btn:hover {
151
- background-color: #0b5ed7;
152
- transform: translateY(-2px);
153
- }
154
 
155
- /* User message styling */
156
- .user-message {
157
- background: #3b82f6;
158
- color: white;
159
- padding: 0.75rem 1rem;
160
- border-radius: 7px;
161
- max-width: 95%;
162
- }
163
 
164
- .user-info {
165
- font-size: 0.875rem;
166
- opacity: 0.9;
167
- margin-bottom: 3px;
168
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- /* Assistant message styling */
171
- .assistant-message {
172
- background: #f1f5f9;
173
- color: #334155;
174
- padding: 0.75rem 1rem;
175
- border-radius: 12px;
176
- max-width: 85%;
177
- }
 
 
 
 
 
 
178
 
179
- .assistant-info {
180
- font-size: 0.875rem;
181
- color: #6b7280;
182
- margin-bottom: 5px;
183
- }
184
 
185
- /* Processing indicator */
186
- .processing-indicator {
187
- background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
188
- color: #333;
189
- padding: 1rem 1.5rem;
190
- border-radius: 12px;
191
- margin: 1rem 0;
192
- margin-left: 0;
193
- margin-right: auto;
194
- max-width: 70%;
195
- box-shadow: 0 2px 10px rgba(0,0,0,0.1);
196
- animation: pulse 2s infinite;
197
- }
198
 
199
- @keyframes pulse {
200
- 0% { opacity: 1; }
201
- 50% { opacity: 0.7; }
202
- 100% { opacity: 1; }
 
 
 
 
 
203
  }
204
 
205
- /* Feedback box */
206
- .feedback-section {
207
- background-color: #f8f9fa;
208
- border: 1px solid #dee2e6;
209
- padding: 1rem;
210
- border-radius: 8px;
211
- margin: 1rem 0;
212
  }
213
 
214
- /* Success and error messages */
215
- .success-message {
216
- background-color: #d1e7dd;
217
- color: #0f5132;
218
- padding: 1rem;
219
- border-radius: 6px;
220
- border: 1px solid #badbcc;
221
  }
222
 
223
- .error-message {
224
- background-color: #f8d7da;
225
- color: #842029;
226
- padding: 1rem;
227
- border-radius: 6px;
228
- border: 1px solid #f5c2c7;
 
 
 
 
 
 
 
 
 
229
  }
 
 
 
 
 
 
 
 
 
230
 
231
- /* Chat input styling - Fixed alignment */
232
- # .stChatInput {
233
- # border-radius: 12px !important;
234
- # border: 2px solid #e5e7eb !important;
235
- # background: #ffffff !important;
236
- # padding: 0.75rem 1rem !important;
237
- # font-size: 1rem !important;
238
- # width: 100% !important;
239
- # max-width: 70% !important;
240
- # margin: 0 !important;
241
- # box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important;
242
- # transition: all 0.2s ease !important;
243
- # }
244
 
245
- # .stChatInput:focus {
246
- # border-color: #3b82f6 !important;
247
- # box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1) !important;
248
- # outline: none !important;
249
- # }
 
250
 
251
- /* Chat input container */
252
- .stChatInput > div {
253
- padding: 0 !important;
254
- margin: 0 !important;
255
- }
256
 
257
- /* Chat input text area */
258
- # .stChatInput textarea {
259
- # border: none !important;
260
- # background: transparent !important;
261
- # padding: 0 !important;
262
- # margin: 0 !important;
263
- # font-size: 1rem !important;
264
- # line-height: 1.5 !important;
265
- # resize: none !important;
266
- # outline: none !important;
267
- # }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
- /* Chat input placeholder */
270
- # .stChatInput textarea::placeholder {
271
- # color: #9ca3af !important;
272
- # font-style: normal !important;
273
- # }
 
 
274
 
275
- .st-emotion-cache-f4ro0r {
276
- align-items = center;
277
- }
278
 
279
- /* Fix the main chat input container alignment */
280
- [data-testid="stChatInput"] {
281
- position: fixed !important;
282
- bottom: 0.5rem !important;
283
- left: 6rem !important;
284
- right: 0 !important;
285
- background: #ffffff !important;
286
- width: 65% !important;
287
- box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.1) !important;
288
- }
289
 
290
- /* Adjust main content to account for fixed chat input */
291
- .main .block-container {
292
- padding-bottom: 100px !important;
293
- }
294
 
295
- /* Chat input button styling */
296
- [data-testid="stChatInput"] button {
297
- background: #3b82f6 !important;
298
- color: white !important;
299
- border: none !important;
300
- border-radius: 12px !important;
301
- font-weight: 600 !important;
302
- transition: background-color 0.2s ease !important;
303
- }
304
-
305
- [data-testid="stChatInput"] button:hover {
306
- background: #2563eb !important;
307
- }
308
-
309
- /* Textarea inside chat input */
310
- [data-testid="stChatInput"] [data-baseweb="textarea"] {
311
- border: 2px solid #3b82f6 !important;
312
- border-radius: 12px !important;
313
- font-size: 16px !important;
314
- color: #111 !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
 
316
- width: 100% !important; /* fill the parent container */
317
- box-sizing: border-box !important;
318
- }
319
 
320
- /* Ensure proper spacing from sidebar */
321
- @media (min-width: 768px) {
322
- [data-testid="stChatInput"] {
323
- margin-left: 21rem !important; /* Account for sidebar width */
324
- }
325
- }
 
 
 
 
 
 
326
 
327
- /* Code container styling */
328
- .code-container {
329
- margin: 1rem 0;
330
- border: 1px solid #d1d5db;
331
- border-radius: 12px;
332
- background: white;
333
- box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
334
- }
335
 
336
- .code-header {
337
- display: flex;
338
- justify-content: space-between;
339
- align-items: center;
340
- padding: 0.875rem 1.25rem;
341
- background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%);
342
- border-bottom: 1px solid #e2e8f0;
343
- cursor: pointer;
344
- transition: all 0.2s ease;
345
- border-radius: 12px 12px 0 0;
346
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
- .code-header:hover {
349
- background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%);
350
- }
351
 
352
- .code-title {
353
- font-size: 0.9rem;
354
- font-weight: 600;
355
- color: #1e293b;
356
- display: flex;
357
- align-items: center;
358
- gap: 0.5rem;
359
- }
360
 
361
- .code-title:before {
362
- content: "";
363
- font-size: 0.8rem;
364
- }
365
 
366
- .toggle-text {
367
- font-size: 0.75rem;
368
- color: #64748b;
369
- font-weight: 500;
370
- }
 
 
 
371
 
372
- .code-block {
373
- background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%);
374
- color: #e2e8f0;
375
- padding: 1.5rem;
376
- font-family: 'SF Mono', 'Monaco', 'Menlo', 'Consolas', monospace;
377
- font-size: 0.875rem;
378
- overflow-x: auto;
379
- line-height: 1.6;
380
- border-radius: 0 0 12px 12px;
381
- }
 
 
382
 
383
- .answer-container {
384
- background: #f8fafc;
385
- border: 1px solid #e2e8f0;
386
- border-radius: 8px;
387
- padding: 1.5rem;
388
- margin: 1rem 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  }
390
 
391
- .answer-text {
392
- font-size: 1.125rem;
393
- color: #1e293b;
394
- line-height: 1.6;
395
- margin-bottom: 1rem;
396
  }
397
 
398
- .answer-highlight {
399
- background: #fef3c7;
400
- padding: 0.125rem 0.375rem;
401
- border-radius: 4px;
402
- font-weight: 600;
403
- color: #92400e;
404
  }
405
 
406
- .context-info {
407
- background: #f1f5f9;
408
- border-left: 4px solid #3b82f6;
409
- padding: 0.75rem 1rem;
410
- margin: 1rem 0;
411
- font-size: 0.875rem;
412
- color: #475569;
413
  }
414
 
415
- /* Hide default menu and footer */
416
- #MainMenu {visibility: hidden;}
417
- footer {visibility: hidden;}
418
- header {visibility: hidden;}
 
 
419
 
420
- /* Auto scroll */
421
- .main-container {
422
- height: 70vh;
423
  overflow-y: auto;
 
424
  }
425
- </style>
426
- """, unsafe_allow_html=True)
427
 
428
- # JavaScript for interactions
429
- st.markdown("""
430
- <script>
431
- function scrollToBottom() {
432
- setTimeout(function() {
433
- const mainContainer = document.querySelector('.main-container');
434
- if (mainContainer) {
435
- mainContainer.scrollTop = mainContainer.scrollHeight;
436
- }
437
- window.scrollTo(0, document.body.scrollHeight);
438
- }, 100);
439
  }
440
 
441
- function toggleCode(header) {
442
- const codeBlock = header.nextElementSibling;
443
- const toggleText = header.querySelector('.toggle-text');
444
-
445
- if (codeBlock.style.display === 'none') {
446
- codeBlock.style.display = 'block';
447
- toggleText.textContent = 'Click to collapse';
448
- } else {
449
- codeBlock.style.display = 'none';
450
- toggleText.textContent = 'Click to expand';
451
- }
452
  }
453
- </script>
454
- """, unsafe_allow_html=True)
455
 
456
- # FORCE reload environment variables
457
- load_dotenv(override=True)
 
 
458
 
459
- # Get API keys
460
- Groq_Token = os.getenv("GROQ_API_KEY")
461
- hf_token = os.getenv("HF_TOKEN")
462
- gemini_token = os.getenv("GEMINI_TOKEN")
463
 
464
- # Model order is decided by this
465
- models = {
466
- "gpt-oss-120b": "openai/gpt-oss-120b",
467
- "qwen3-32b": "qwen/qwen3-32b",
468
- "gpt-oss-20b": "openai/gpt-oss-20b",
469
- "llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct",
470
- "llama3.3": "llama-3.3-70b-versatile",
471
- "deepseek-R1": "deepseek-r1-distill-llama-70b",
472
- "gemini-2.5-flash": "gemini-2.5-flash",
473
- "gemini-2.5-pro": "gemini-2.5-pro",
474
- "gemini-2.5-flash-lite": "gemini-2.5-flash-lite",
475
- "gemini-2.0-flash": "gemini-2.0-flash",
476
- "gemini-2.0-flash-lite": "gemini-2.0-flash-lite",
477
- # "llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct"
478
- # "llama3.1": "llama-3.1-8b-instant"
479
  }
480
 
481
- self_path = os.path.dirname(os.path.abspath(__file__))
 
 
 
 
 
 
482
 
483
- # Initialize session ID for this session
484
- if "session_id" not in st.session_state:
485
- st.session_state.session_id = str(uuid.uuid4())
 
 
 
 
 
 
 
486
 
487
- def upload_feedback(feedback, error, output, last_prompt, code, status):
488
- """Enhanced feedback upload function with better logging and error handling"""
489
- try:
490
- if not hf_token or hf_token.strip() == "":
491
- st.warning("Cannot upload feedback - HF_TOKEN not available")
492
- return False
 
 
 
 
 
493
 
494
- # Create comprehensive feedback data
495
- feedback_data = {
496
- "timestamp": datetime.now().isoformat(),
497
- "session_id": st.session_state.session_id,
498
- "feedback_score": feedback.get("score", ""),
499
- "feedback_comment": feedback.get("text", ""),
500
- "user_prompt": last_prompt,
501
- "ai_output": str(output),
502
- "generated_code": code or "",
503
- "error_message": error or "",
504
- "is_image_output": status.get("is_image", False),
505
- "success": not bool(error)
506
- }
507
-
508
- # Create unique folder name with timestamp
509
- timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
510
- random_id = str(uuid.uuid4())[:8]
511
- folder_name = f"feedback_{timestamp_str}_{random_id}"
512
-
513
- # Create markdown feedback file
514
- markdown_content = f"""# VayuChat Feedback Report
515
 
516
- ## Session Information
517
- - **Timestamp**: {feedback_data['timestamp']}
518
- - **Session ID**: {feedback_data['session_id']}
 
519
 
520
- ## User Interaction
521
- **Prompt**: {feedback_data['user_prompt']}
 
 
 
 
 
 
522
 
523
- ## AI Response
524
- **Output**: {feedback_data['ai_output']}
 
 
 
525
 
526
- ## Generated Code
527
- ```python
528
- {feedback_data['generated_code']}
529
- ```
 
 
 
 
530
 
531
- ## Technical Details
532
- - **Error Message**: {feedback_data['error_message']}
533
- - **Is Image Output**: {feedback_data['is_image_output']}
534
- - **Success**: {feedback_data['success']}
 
535
 
536
- ## User Feedback
537
- - **Score**: {feedback_data['feedback_score']}
538
- - **Comments**: {feedback_data['feedback_comment']}
539
- """
 
 
 
 
 
 
 
 
 
540
 
541
- # Save markdown file locally
542
- markdown_filename = f"{folder_name}.md"
543
- markdown_local_path = f"/tmp/{markdown_filename}"
544
-
545
- with open(markdown_local_path, "w", encoding="utf-8") as f:
546
- f.write(markdown_content)
547
 
548
- # Upload to Hugging Face
549
- api = HfApi(token=hf_token)
550
-
551
- # Upload markdown feedback
552
- api.upload_file(
553
- path_or_fileobj=markdown_local_path,
554
- path_in_repo=f"data/{markdown_filename}",
555
- repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
556
- repo_type="dataset",
557
- )
558
-
559
- # Upload image if it exists and is an image output
560
- if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output):
561
- try:
562
- image_filename = f"{folder_name}_plot.png"
563
- api.upload_file(
564
- path_or_fileobj=output,
565
- path_in_repo=f"data/{image_filename}",
566
- repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
567
- repo_type="dataset",
568
- )
569
- except Exception as img_error:
570
- print(f"Error uploading image: {img_error}")
571
-
572
- # Clean up local files
573
- if os.path.exists(markdown_local_path):
574
- os.remove(markdown_local_path)
575
-
576
- st.success("Feedback uploaded successfully!")
577
- return True
578
-
579
- except Exception as e:
580
- st.error(f"Error uploading feedback: {e}")
581
- print(f"Feedback upload error: {e}")
582
- return False
583
 
584
- # Filter available models
585
- available_models = []
586
- model_names = list(models.keys())
587
- groq_models = []
588
- gemini_models = []
589
- for model_name in model_names:
590
- if "gemini" not in model_name:
591
- groq_models.append(model_name)
592
- else:
593
- gemini_models.append(model_name)
594
- if Groq_Token and Groq_Token.strip():
595
- available_models.extend(groq_models)
596
- if gemini_token and gemini_token.strip():
597
- available_models.extend(gemini_models)
598
 
599
- if not available_models:
600
- st.error("No API keys available! Please set up your API keys in the .env file")
601
- st.stop()
 
 
 
 
602
 
603
- # Set GPT-OSS-120B as default if available
604
- default_index = 0
605
- if "gpt-oss-120b" in available_models:
606
- default_index = available_models.index("gpt-oss-120b")
607
- elif "deepseek-R1" in available_models:
608
- default_index = available_models.index("deepseek-R1")
 
 
 
 
 
 
 
609
 
610
- # Compact header - everything perfectly aligned at same height
 
 
 
 
611
 
612
- st.markdown("""
613
- <div style='
614
- display: flex;
615
- align-items: center;
616
- justify-content: center;
617
- padding: 0.5rem 0;
618
- gap: 12px;
619
- border-bottom: 1px solid #e5e7eb;
620
- margin-bottom: 1rem;
621
- '>
622
- <img src='https://sustainability-lab.github.io/images/logo_light.svg'
623
- style='height: 80px;' />
624
- <div style='display: flex; flex-direction: column; line-height: 1.2;'>
625
- <h1 style='
626
- margin: 0;
627
- font-size: 1.5rem;
628
- font-weight: 700;
629
- color: #2563eb;
630
- '>VayuChat</h1>
631
- <span style='
632
- font-size: 0.85rem;
633
- color: #6b7280;
634
- font-weight: 500;
635
- '>AI Air Quality Analysis • Sustainability Lab, IIT Gandhinagar</span>
636
- </div>
637
- </div>
638
- """, unsafe_allow_html=True)
639
 
640
- # Load data with caching for better performance
641
- @st.cache_data
642
- def load_data():
643
- return preprocess_and_load_df(join(self_path, "Data.csv"))
 
 
 
 
 
 
 
644
 
645
- try:
646
- df = load_data()
647
- # Data loaded silently - no success message needed
648
- except Exception as e:
649
- st.error(f"Error loading data: {e}")
650
- st.stop()
651
 
652
- inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
653
- image_path = "IITGN_Logo.png"
 
654
 
655
- # Clean sidebar
656
- with st.sidebar:
657
- # Model selector at top of sidebar for easy access
658
- model_name = st.selectbox(
659
- "🤖 AI Model:",
660
- available_models,
661
- index=default_index,
662
- help="Choose your AI model - easily accessible without scrolling!"
663
- )
664
-
665
- st.markdown("---")
666
-
667
- # Quick Queries Section
668
- st.markdown("### 💭 Quick Queries")
669
-
670
- # Load quick prompts with caching
671
- @st.cache_data
672
- def load_questions():
673
- questions = []
674
- questions_file = join(self_path, "questions.txt")
675
- if os.path.exists(questions_file):
676
- try:
677
- with open(questions_file, 'r', encoding='utf-8') as f:
678
- content = f.read()
679
- questions = [q.strip() for q in content.split("\n") if q.strip()]
680
- except Exception as e:
681
- questions = []
682
- return questions
683
-
684
- questions = load_questions()
685
-
686
- # Add default prompts if file doesn't exist or is empty
687
- if not questions:
688
- questions = [
689
- "Which month had highest pollution?",
690
- "Which city has worst air quality?",
691
- "Show annual PM2.5 average",
692
- "Plot monthly average PM2.5 for 2023",
693
- "List all cities by pollution level",
694
- "Compare winter vs summer pollution",
695
- "Show seasonal pollution patterns",
696
- "Which areas exceed WHO guidelines?",
697
- "What are peak pollution hours?",
698
- "Show PM10 vs PM2.5 comparison",
699
- "Which station records highest variability in PM2.5?",
700
- "Calculate pollution improvement rate year-over-year by city",
701
- "Identify cities with PM2.5 levels consistently above 50 μg/m³ for >6 months",
702
- "Find correlation between PM2.5 and PM10 across different seasons and cities",
703
- "Compare weekday vs weekend levels",
704
- "Plot yearly trend analysis",
705
- "Show pollution distribution by city",
706
- "Create correlation plot between pollutants"
707
- ]
708
-
709
- # Quick query buttons in sidebar
710
- selected_prompt = None
711
-
712
-
713
- # Show all questions but in a scrollable format
714
- if len(questions) > 0:
715
- st.markdown("**Select a question to analyze:**")
716
-
717
- # Getting Started section with simple questions
718
- getting_started_questions = questions[:10] # First 10 simple questions
719
- with st.expander("🚀 Getting Started - Simple Questions", expanded=True):
720
- for i, q in enumerate(getting_started_questions):
721
- if st.button(q, key=f"start_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
722
- selected_prompt = q
723
- st.session_state.last_selected_prompt = q
724
-
725
- # Create expandable sections for better organization
726
- with st.expander("📊 NCAP Funding & Policy Analysis", expanded=False):
727
- for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ncap', 'funding', 'investment', 'rupee'])]):
728
- if st.button(q, key=f"ncap_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
729
- selected_prompt = q
730
- st.session_state.last_selected_prompt = q
731
-
732
- with st.expander("🌬️ Meteorology & Environmental Factors", expanded=False):
733
- for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'])]):
734
- if st.button(q, key=f"met_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
735
- selected_prompt = q
736
- st.session_state.last_selected_prompt = q
737
-
738
- with st.expander("👥 Population & Demographics", expanded=False):
739
- for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['population', 'capita', 'density', 'exposure'])]):
740
- if st.button(q, key=f"pop_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
741
- selected_prompt = q
742
- st.session_state.last_selected_prompt = q
743
-
744
- with st.expander("🏭 Multi-Pollutant Analysis", expanded=False):
745
- for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'])]):
746
- if st.button(q, key=f"multi_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
747
- selected_prompt = q
748
- st.session_state.last_selected_prompt = q
749
-
750
- with st.expander("📈 Other Analysis Questions", expanded=False):
751
- remaining_questions = [q for q in questions if not any(any(word in q.lower() for word in category) for category in [
752
- ['ncap', 'funding', 'investment', 'rupee'],
753
- ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'],
754
- ['population', 'capita', 'density', 'exposure'],
755
- ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction']
756
- ])]
757
- for i, q in enumerate(remaining_questions):
758
- if st.button(q, key=f"other_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
759
- selected_prompt = q
760
- st.session_state.last_selected_prompt = q
761
-
762
- st.markdown("---")
763
-
764
-
765
- # Clear Chat Button
766
- if st.button("Clear Chat", use_container_width=True):
767
- st.session_state.responses = []
768
- st.session_state.processing = False
769
- st.session_state.session_id = str(uuid.uuid4())
770
- try:
771
- st.rerun()
772
- except AttributeError:
773
- st.experimental_rerun()
774
 
775
- # Initialize session state first
776
- if "responses" not in st.session_state:
777
- st.session_state.responses = []
778
- if "processing" not in st.session_state:
779
- st.session_state.processing = False
780
- if "session_id" not in st.session_state:
781
- st.session_state.session_id = str(uuid.uuid4())
782
 
 
 
 
 
 
 
 
 
 
783
 
 
 
 
784
 
 
 
 
 
 
 
785
 
786
- def show_custom_response(response):
787
- """Custom response display function with improved styling"""
788
- role = response.get("role", "assistant")
789
- content = response.get("content", "")
790
-
791
- if role == "user":
792
- # User message with right alignment - reduced margins
793
- st.markdown(f"""
794
- <div style='display: flex; justify-content: flex-end; margin: 1rem 0;'>
795
- <div class='user-message'>
796
- {content}
797
- </div>
798
- </div>
799
- """, unsafe_allow_html=True)
800
- elif role == "assistant":
801
- # Check if content is an image filename - don't display the filename text
802
- is_image_path = isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg', '.jpeg'])
803
-
804
- # Check if content is a pandas DataFrame
805
- import pandas as pd
806
- is_dataframe = isinstance(content, pd.DataFrame)
807
-
808
- # Check for errors first and display them with special styling
809
- error = response.get("error")
810
- timestamp = response.get("timestamp", "")
811
- timestamp_display = f" • {timestamp}" if timestamp else ""
812
-
813
- if error:
814
- st.markdown(f"""
815
- <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
816
- <div class='assistant-message'>
817
- <div class='assistant-info'>VayuChat{timestamp_display}</div>
818
- <div class='error-message'>
819
- ⚠️ <strong>Error:</strong> {error}
820
- <br><br>
821
- <em>💡 Try rephrasing your question or being more specific about what you'd like to analyze.</em>
822
- </div>
823
- </div>
824
- </div>
825
- """, unsafe_allow_html=True)
826
- # Assistant message with left alignment - reduced margins
827
- elif not is_image_path and not is_dataframe:
828
- st.markdown(f"""
829
- <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
830
- <div class='assistant-message'>
831
- <div class='assistant-info'>VayuChat{timestamp_display}</div>
832
- {content if isinstance(content, str) else str(content)}
833
- </div>
834
- </div>
835
- """, unsafe_allow_html=True)
836
- elif is_dataframe:
837
- # Display DataFrame with nice formatting
838
- st.markdown(f"""
839
- <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
840
- <div class='assistant-message'>
841
- <div class='assistant-info'>VayuChat{timestamp_display}</div>
842
- Here are the results:
843
- </div>
844
- </div>
845
- """, unsafe_allow_html=True)
846
-
847
- # Add context info for dataframes
848
- st.markdown("""
849
- <div class='context-info'>
850
- 💡 This table is interactive - click column headers to sort, or scroll to view all data.
851
- </div>
852
- """, unsafe_allow_html=True)
853
-
854
- st.dataframe(content, use_container_width=True)
855
-
856
- # Show generated code with Streamlit expander
857
- if response.get("gen_code"):
858
- with st.expander("📋 View Generated Code", expanded=False):
859
- st.code(response["gen_code"], language="python")
860
-
861
- # Try to display image if content is a file path
862
- try:
863
- if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
864
- if os.path.exists(content):
865
- # Display image without showing filename
866
- st.image(content, width=800)
867
- return {"is_image": True}
868
- # Also handle case where content shows filename but we want to show image
869
- elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']):
870
- # Extract potential filename from content
871
- import re
872
- filename_match = re.search(r'([^/\\]+\.(?:png|jpg|jpeg))', content)
873
- if filename_match:
874
- filename = filename_match.group(1)
875
- if os.path.exists(filename):
876
- st.image(filename, width=800)
877
- return {"is_image": True}
878
- except:
879
- pass
880
-
881
- return {"is_image": False}
882
 
 
 
 
 
 
 
883
 
884
- # Chat history
885
- # Display chat history
886
- for response_id, response in enumerate(st.session_state.responses):
887
- status = show_custom_response(response)
888
-
889
- # Show feedback section for assistant responses
890
- if response["role"] == "assistant":
891
- feedback_key = f"feedback_{int(response_id/2)}"
892
- error = response.get("error", "")
893
- output = response.get("content", "")
894
- last_prompt = response.get("last_prompt", "")
895
- code = response.get("gen_code", "")
896
 
 
 
 
 
 
 
 
 
 
 
 
897
 
898
- # Beautiful action bar with feedback and retry
899
- st.markdown('<div style="margin: 1.5rem 0 0.5rem 0;"></div>', unsafe_allow_html=True) # Spacer
900
-
901
- if "feedback" in st.session_state.responses[response_id]:
902
- # Show submitted feedback nicely
903
- feedback_data = st.session_state.responses[response_id]["feedback"]
904
- col1, col2 = st.columns([3, 1])
905
- with col1:
906
- st.markdown(f"""
907
- <div style='
908
- background: linear-gradient(135deg, #ecfdf5 0%, #d1fae5 100%);
909
- border: 1px solid #a7f3d0;
910
- border-radius: 8px;
911
- padding: 0.75rem 1rem;
912
- display: flex;
913
- align-items: center;
914
- gap: 8px;
915
- '>
916
- <span style='font-size: 1.1rem;'>{feedback_data.get('score', '')}</span>
917
- <span style='color: #059669; font-weight: 500; font-size: 0.9rem;'>
918
- Thanks for your feedback!
919
- </span>
920
- </div>
921
- """, unsafe_allow_html=True)
922
- with col2:
923
- if st.button("🔄 Retry", key=f"retry_{response_id}", use_container_width=True):
924
- user_prompt = ""
925
- if response_id > 0:
926
- user_prompt = st.session_state.responses[response_id-1].get("content", "")
927
- if user_prompt:
928
- if response_id > 0:
929
- retry_prompt = st.session_state.responses[response_id-1].get("content", "")
930
- del st.session_state.responses[response_id]
931
- del st.session_state.responses[response_id-1]
932
- st.session_state.follow_up_prompt = retry_prompt
933
- st.rerun()
934
- else:
935
- # Clean feedback and retry layout
936
- col1, col2, col3, col4 = st.columns([2, 2, 1, 1])
937
-
938
- with col1:
939
- if st.button("✨ Excellent", key=f"{feedback_key}_excellent", use_container_width=True):
940
- feedback = {"score": "✨ Excellent", "text": ""}
941
- st.session_state.responses[response_id]["feedback"] = feedback
942
- st.rerun()
943
-
944
- with col2:
945
- if st.button("🔧 Needs work", key=f"{feedback_key}_poor", use_container_width=True):
946
- feedback = {"score": "🔧 Needs work", "text": ""}
947
- st.session_state.responses[response_id]["feedback"] = feedback
948
- st.rerun()
949
-
950
- with col4:
951
- if st.button("🔄 Retry", key=f"retry_{response_id}", use_container_width=True):
952
- user_prompt = ""
953
- if response_id > 0:
954
- user_prompt = st.session_state.responses[response_id-1].get("content", "")
955
- if user_prompt:
956
- if response_id > 0:
957
- retry_prompt = st.session_state.responses[response_id-1].get("content", "")
958
- del st.session_state.responses[response_id]
959
- del st.session_state.responses[response_id-1]
960
- st.session_state.follow_up_prompt = retry_prompt
961
- st.rerun()
962
 
963
- # Chat input with better guidance
964
- prompt = st.chat_input("💬 Ask about air quality trends, pollution analysis, or city comparisons...", key="main_chat")
 
 
 
 
 
 
965
 
966
- # Handle selected prompt from quick prompts
967
- if selected_prompt:
968
- prompt = selected_prompt
 
969
 
970
- # Handle follow-up prompts from quick action buttons
971
- if st.session_state.get("follow_up_prompt") and not st.session_state.get("processing"):
972
- prompt = st.session_state.follow_up_prompt
973
- st.session_state.follow_up_prompt = None # Clear the follow-up prompt
 
974
 
975
- # Handle new queries
976
- if prompt and not st.session_state.get("processing"):
977
- # Prevent duplicate processing
978
- if "last_prompt" in st.session_state:
979
- last_prompt = st.session_state["last_prompt"]
980
- last_model_name = st.session_state.get("last_model_name", "")
981
- if (prompt == last_prompt) and (model_name == last_model_name):
982
- prompt = None
 
 
983
 
984
- if prompt:
985
- # Add user input to chat history
986
- user_response = get_from_user(prompt)
987
- st.session_state.responses.append(user_response)
988
-
989
- # Set processing state
990
- st.session_state.processing = True
991
- st.session_state.current_model = model_name
992
- st.session_state.current_question = prompt
993
-
994
- # Rerun to show processing indicator
995
- st.rerun()
996
 
997
- # Process the question if we're in processing state
998
- if st.session_state.get("processing"):
999
- # Enhanced processing indicator like Claude Code
1000
- st.markdown("""
1001
- <div style='padding: 1rem; text-align: center; background: #f8fafc; border-radius: 8px; margin: 1rem 0;'>
1002
- <div style='display: flex; align-items: center; justify-content: center; gap: 0.5rem; color: #475569;'>
1003
- <div style='font-weight: 500;'>🤖 Processing with """ + str(st.session_state.get('current_model', 'Unknown')) + """</div>
1004
- <div class='dots' style='display: inline-flex; gap: 2px;'>
1005
- <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out;'></div>
1006
- <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.16s;'></div>
1007
- <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.32s;'></div>
1008
- </div>
1009
- </div>
1010
- <div style='font-size: 0.75rem; color: #6b7280; margin-top: 0.25rem;'>Analyzing data and generating response...</div>
1011
- </div>
1012
- <style>
1013
- @keyframes bounce {
1014
- 0%, 80%, 100% { transform: scale(0.8); opacity: 0.5; }
1015
- 40% { transform: scale(1.2); opacity: 1; }
1016
- }
1017
- </style>
1018
- """, unsafe_allow_html=True)
1019
-
1020
- prompt = st.session_state.get("current_question")
1021
- model_name = st.session_state.get("current_model")
1022
-
1023
- try:
1024
- response = ask_question(model_name=model_name, question=prompt)
1025
-
1026
- if not isinstance(response, dict):
1027
- response = {
1028
- "role": "assistant",
1029
- "content": "Error: Invalid response format",
1030
- "gen_code": "",
1031
- "ex_code": "",
1032
- "last_prompt": prompt,
1033
- "error": "Invalid response format",
1034
- "timestamp": datetime.now().strftime("%H:%M")
1035
- }
1036
-
1037
- response.setdefault("role", "assistant")
1038
- response.setdefault("content", "No content generated")
1039
- response.setdefault("gen_code", "")
1040
- response.setdefault("ex_code", "")
1041
- response.setdefault("last_prompt", prompt)
1042
- response.setdefault("error", None)
1043
- response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
1044
-
1045
- except Exception as e:
1046
- response = {
1047
- "role": "assistant",
1048
- "content": f"Sorry, I encountered an error: {str(e)}",
1049
- "gen_code": "",
1050
- "ex_code": "",
1051
- "last_prompt": prompt,
1052
- "error": str(e),
1053
- "timestamp": datetime.now().strftime("%H:%M")
1054
- }
1055
 
1056
- st.session_state.responses.append(response)
1057
- st.session_state["last_prompt"] = prompt
1058
- st.session_state["last_model_name"] = model_name
1059
- st.session_state.processing = False
1060
-
1061
- # Clear processing state
1062
- if "current_model" in st.session_state:
1063
- del st.session_state.current_model
1064
- if "current_question" in st.session_state:
1065
- del st.session_state.current_question
1066
-
1067
- st.rerun()
1068
 
1069
- # Close chat container
1070
- st.markdown("</div>", unsafe_allow_html=True)
 
 
 
 
 
 
1071
 
1072
- # Minimal auto-scroll - only scroll when processing
1073
- if st.session_state.get("processing"):
1074
- st.markdown("<script>scrollToBottom();</script>", unsafe_allow_html=True)
 
1075
 
1076
- # Dataset Info Section (matching mockup)
1077
- st.markdown("### Dataset Info")
1078
- st.markdown("""
1079
- <div style='background: #f1f5f9; border-radius: 8px; padding: 1rem; margin-bottom: 1rem;'>
1080
- <h4 style='margin: 0 0 0.5rem 0; color: #1e293b; font-size: 0.9rem;'>PM2.5 Air Quality Data</h4>
1081
- <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Time Range:</strong> 2022 - 2023</p>
1082
- <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Locations:</strong> 300+ cities across India</p>
1083
- <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Records:</strong> 100,000+ measurements</p>
1084
- </div>
1085
- """, unsafe_allow_html=True)
 
36
  initial_sidebar_state="expanded"
37
  )
38
 
39
+ # JavaScript for interactions
40
+ # st.markdown("""
41
+ # <script>
42
+ # function scrollToBottom() {
43
+ # setTimeout(function() {
44
+ # const mainContainer = document.querySelector('.main-container');
45
+ # if (mainContainer) {
46
+ # mainContainer.scrollTop = mainContainer.scrollHeight;
47
+ # }
48
+ # window.scrollTo(0, document.body.scrollHeight);
49
+ # }, 100);
50
+ # }
51
 
52
+ # function toggleCode(header) {
53
+ # const codeBlock = header.nextElementSibling;
54
+ # const toggleText = header.querySelector('.toggle-text');
55
+
56
+ # if (codeBlock.style.display === 'none') {
57
+ # codeBlock.style.display = 'block';
58
+ # toggleText.textContent = 'Click to collapse';
59
+ # } else {
60
+ # codeBlock.style.display = 'none';
61
+ # toggleText.textContent = 'Click to expand';
62
+ # }
63
+ # }
64
+ # </script>
65
+ # """, unsafe_allow_html=True)
66
 
67
+ # FORCE reload environment variables
68
+ load_dotenv(override=True)
 
 
69
 
70
+ # Get API keys
71
+ Groq_Token = os.getenv("GROQ_API_KEY")
72
+ hf_token = os.getenv("HF_TOKEN")
73
+ gemini_token = os.getenv("GEMINI_TOKEN")
74
 
75
+ # Model order is decided by this
76
+ models = {
77
+ "gpt-oss-120b": "openai/gpt-oss-120b",
78
+ "qwen3-32b": "qwen/qwen3-32b",
79
+ "gpt-oss-20b": "openai/gpt-oss-20b",
80
+ "llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct",
81
+ "llama3.3": "llama-3.3-70b-versatile",
82
+ "deepseek-R1": "deepseek-r1-distill-llama-70b",
83
+ "gemini-2.5-flash": "gemini-2.5-flash",
84
+ "gemini-2.5-pro": "gemini-2.5-pro",
85
+ "gemini-2.5-flash-lite": "gemini-2.5-flash-lite",
86
+ "gemini-2.0-flash": "gemini-2.0-flash",
87
+ "gemini-2.0-flash-lite": "gemini-2.0-flash-lite",
88
+ # "llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct"
89
+ # "llama3.1": "llama-3.1-8b-instant"
90
  }
91
 
92
+ self_path = os.path.dirname(os.path.abspath(__file__))
 
 
 
 
 
93
 
94
+ # Initialize session ID for this session
95
+ if "session_id" not in st.session_state:
96
+ st.session_state.session_id = str(uuid.uuid4())
97
 
98
+ def upload_feedback(feedback, error, output, last_prompt, code, status):
99
+ """Enhanced feedback upload function with better logging and error handling"""
100
+ try:
101
+ if not hf_token or hf_token.strip() == "":
102
+ st.warning("Cannot upload feedback - HF_TOKEN not available")
103
+ return False
104
 
105
+ # Create comprehensive feedback data
106
+ feedback_data = {
107
+ "timestamp": datetime.now().isoformat(),
108
+ "session_id": st.session_state.session_id,
109
+ "feedback_score": feedback.get("score", ""),
110
+ "feedback_comment": feedback.get("text", ""),
111
+ "user_prompt": last_prompt,
112
+ "ai_output": str(output),
113
+ "generated_code": code or "",
114
+ "error_message": error or "",
115
+ "is_image_output": status.get("is_image", False),
116
+ "success": not bool(error)
117
+ }
118
 
119
+ # Create unique folder name with timestamp
120
+ timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
121
+ random_id = str(uuid.uuid4())[:8]
122
+ folder_name = f"feedback_{timestamp_str}_{random_id}"
123
+
124
+ # Create markdown feedback file
125
+ markdown_content = f"""# VayuChat Feedback Report
126
 
127
+ ## Session Information
128
+ - **Timestamp**: {feedback_data['timestamp']}
129
+ - **Session ID**: {feedback_data['session_id']}
 
 
 
 
 
130
 
131
+ ## User Interaction
132
+ **Prompt**: {feedback_data['user_prompt']}
 
 
 
 
 
133
 
134
+ ## AI Response
135
+ **Output**: {feedback_data['ai_output']}
 
 
 
 
 
 
 
 
136
 
137
+ ## Generated Code
138
+ ```python
139
+ {feedback_data['generated_code']}
140
+ ```
 
 
 
 
 
 
 
141
 
142
+ ## Technical Details
143
+ - **Error Message**: {feedback_data['error_message']}
144
+ - **Is Image Output**: {feedback_data['is_image_output']}
145
+ - **Success**: {feedback_data['success']}
 
 
 
 
 
 
 
146
 
147
+ ## User Feedback
148
+ - **Score**: {feedback_data['feedback_score']}
149
+ - **Comments**: {feedback_data['feedback_comment']}
150
+ """
151
 
152
+ # Save markdown file locally
153
+ markdown_filename = f"{folder_name}.md"
154
+ markdown_local_path = f"/tmp/{markdown_filename}"
155
+
156
+ with open(markdown_local_path, "w", encoding="utf-8") as f:
157
+ f.write(markdown_content)
 
 
158
 
159
+ # Upload to Hugging Face
160
+ api = HfApi(token=hf_token)
161
+
162
+ # Upload markdown feedback
163
+ api.upload_file(
164
+ path_or_fileobj=markdown_local_path,
165
+ path_in_repo=f"data/{markdown_filename}",
166
+ repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
167
+ repo_type="dataset",
168
+ )
169
+
170
+ # Upload image if it exists and is an image output
171
+ if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output):
172
+ try:
173
+ image_filename = f"{folder_name}_plot.png"
174
+ api.upload_file(
175
+ path_or_fileobj=output,
176
+ path_in_repo=f"data/{image_filename}",
177
+ repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
178
+ repo_type="dataset",
179
+ )
180
+ except Exception as img_error:
181
+ print(f"Error uploading image: {img_error}")
182
+
183
+ # Clean up local files
184
+ if os.path.exists(markdown_local_path):
185
+ os.remove(markdown_local_path)
186
+
187
+ st.success("Feedback uploaded successfully!")
188
+ return True
189
+
190
+ except Exception as e:
191
+ st.error(f"Error uploading feedback: {e}")
192
+ print(f"Feedback upload error: {e}")
193
+ return False
194
 
195
+ # Filter available models
196
+ available_models = []
197
+ model_names = list(models.keys())
198
+ groq_models = []
199
+ gemini_models = []
200
+ for model_name in model_names:
201
+ if "gemini" not in model_name:
202
+ groq_models.append(model_name)
203
+ else:
204
+ gemini_models.append(model_name)
205
+ if Groq_Token and Groq_Token.strip():
206
+ available_models.extend(groq_models)
207
+ if gemini_token and gemini_token.strip():
208
+ available_models.extend(gemini_models)
209
 
210
+ if not available_models:
211
+ st.error("No API keys available! Please set up your API keys in the .env file")
212
+ st.stop()
 
 
213
 
214
+ # Set GPT-OSS-120B as default if available
215
+ default_index = 0
216
+ if "gpt-oss-120b" in available_models:
217
+ default_index = available_models.index("gpt-oss-120b")
218
+ elif "deepseek-R1" in available_models:
219
+ default_index = available_models.index("deepseek-R1")
 
 
 
 
 
 
 
220
 
221
+ # Compact header - everything perfectly aligned at same height
222
+ st.markdown("""
223
+ <style>
224
+ .header-container {
225
+ display: flex;
226
+ align-items: center;
227
+ justify-content: center;
228
+ gap: 12px;
229
+ border-bottom: 1px solid #e5e7eb;
230
  }
231
 
232
+ .header-container img {
233
+ height: 80px;
 
 
 
 
 
234
  }
235
 
236
+ .header-container h1 {
237
+ padding: 0.25rem 0;
238
+ margin: 0;
239
+ font-size: 1.5rem;
240
+ font-weight: 700;
241
+ color: #2563eb;
 
242
  }
243
 
244
+ /* 🔹 Responsive: On small screens stack vertically */
245
+ @media (max-width: 768px) {
246
+ .header-container {
247
+ flex-direction: column;
248
+ text-align: center;
249
+ gap: 0;
250
+ padding: 0 0 0.40rem;
251
+ }
252
+ .header-container img {
253
+ height: 60px;
254
+ }
255
+ .header-container h1 {
256
+ padding: 0 0;
257
+ font-size: 1.25rem;
258
+ }
259
  }
260
+ </style>
261
+ <div class="header-container">
262
+ <img src="https://sustainability-lab.github.io/images/logo_light.svg" />
263
+ <div style="display: flex; flex-direction: column; line-height: 1.2;">
264
+ <h1>VayuChat</h1>
265
+ <span>AI Air Quality Analysis • Sustainability Lab, IIT Gandhinagar</span>
266
+ </div>
267
+ </div>
268
+ """, unsafe_allow_html=True)
269
 
270
+ # Load data with caching for better performance
271
+ @st.cache_data
272
+ def load_data():
273
+ return preprocess_and_load_df(join(self_path, "Data.csv"))
 
 
 
 
 
 
 
 
 
274
 
275
+ try:
276
+ df = load_data()
277
+ # Data loaded silently - no success message needed
278
+ except Exception as e:
279
+ st.error(f"Error loading data: {e}")
280
+ st.stop()
281
 
282
+ inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
283
+ image_path = "IITGN_Logo.png"
 
 
 
284
 
285
+ # Clean sidebar
286
+ with st.sidebar:
287
+ # Model selector at top of sidebar for easy access
288
+ model_name = st.selectbox(
289
+ "🤖 AI Model:",
290
+ available_models,
291
+ index=default_index,
292
+ help="Choose your AI model - easily accessible without scrolling!"
293
+ )
294
+
295
+ st.markdown("---")
296
+
297
+ # Quick Queries Section
298
+ st.markdown("### 💭 Quick Queries")
299
+
300
+ # Load quick prompts with caching
301
+ @st.cache_data
302
+ def load_questions():
303
+ questions = []
304
+ questions_file = join(self_path, "questions.txt")
305
+ if os.path.exists(questions_file):
306
+ try:
307
+ with open(questions_file, 'r', encoding='utf-8') as f:
308
+ content = f.read()
309
+ questions = [q.strip() for q in content.split("\n") if q.strip()]
310
+ except Exception as e:
311
+ questions = []
312
+ return questions
313
+
314
+ questions = load_questions()
315
+
316
+ # Add default prompts if file doesn't exist or is empty
317
+ if not questions:
318
+ questions = [
319
+ "Which month had highest pollution?",
320
+ "Which city has worst air quality?",
321
+ "Show annual PM2.5 average",
322
+ "Plot monthly average PM2.5 for 2023",
323
+ "List all cities by pollution level",
324
+ "Compare winter vs summer pollution",
325
+ "Show seasonal pollution patterns",
326
+ "Which areas exceed WHO guidelines?",
327
+ "What are peak pollution hours?",
328
+ "Show PM10 vs PM2.5 comparison",
329
+ "Which station records highest variability in PM2.5?",
330
+ "Calculate pollution improvement rate year-over-year by city",
331
+ "Identify cities with PM2.5 levels consistently above 50 μg/m³ for >6 months",
332
+ "Find correlation between PM2.5 and PM10 across different seasons and cities",
333
+ "Compare weekday vs weekend levels",
334
+ "Plot yearly trend analysis",
335
+ "Show pollution distribution by city",
336
+ "Create correlation plot between pollutants"
337
+ ]
338
+
339
+ # Quick query buttons in sidebar
340
+ selected_prompt = None
341
+
342
+
343
+ # Show all questions but in a scrollable format
344
+ if len(questions) > 0:
345
+ st.markdown("**Select a question to analyze:**")
346
+
347
+ # Getting Started section with simple questions
348
+ getting_started_questions = questions[:10] # First 10 simple questions
349
+ with st.expander("🚀 Getting Started - Simple Questions", expanded=True):
350
+ for i, q in enumerate(getting_started_questions):
351
+ if st.button(q, key=f"start_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
352
+ selected_prompt = q
353
+ st.session_state.last_selected_prompt = q
354
+
355
+ # Create expandable sections for better organization
356
+ with st.expander("📊 NCAP Funding & Policy Analysis", expanded=False):
357
+ for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ncap', 'funding', 'investment', 'rupee'])]):
358
+ if st.button(q, key=f"ncap_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
359
+ selected_prompt = q
360
+ st.session_state.last_selected_prompt = q
361
+
362
+ with st.expander("🌬️ Meteorology & Environmental Factors", expanded=False):
363
+ for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'])]):
364
+ if st.button(q, key=f"met_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
365
+ selected_prompt = q
366
+ st.session_state.last_selected_prompt = q
367
+
368
+ with st.expander("👥 Population & Demographics", expanded=False):
369
+ for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['population', 'capita', 'density', 'exposure'])]):
370
+ if st.button(q, key=f"pop_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
371
+ selected_prompt = q
372
+ st.session_state.last_selected_prompt = q
373
+
374
+ with st.expander("🏭 Multi-Pollutant Analysis", expanded=False):
375
+ for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'])]):
376
+ if st.button(q, key=f"multi_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
377
+ selected_prompt = q
378
+ st.session_state.last_selected_prompt = q
379
+
380
+ with st.expander("📈 Other Analysis Questions", expanded=False):
381
+ remaining_questions = [q for q in questions if not any(any(word in q.lower() for word in category) for category in [
382
+ ['ncap', 'funding', 'investment', 'rupee'],
383
+ ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'],
384
+ ['population', 'capita', 'density', 'exposure'],
385
+ ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction']
386
+ ])]
387
+ for i, q in enumerate(remaining_questions):
388
+ if st.button(q, key=f"other_q_{i}", use_container_width=True, help=f"Analyze: {q}"):
389
+ selected_prompt = q
390
+ st.session_state.last_selected_prompt = q
391
+
392
+ st.markdown("---")
393
+
394
+
395
+ # Clear Chat Button
396
+ if st.button("Clear Chat", use_container_width=True):
397
+ st.session_state.responses = []
398
+ st.session_state.processing = False
399
+ st.session_state.session_id = str(uuid.uuid4())
400
+ try:
401
+ st.rerun()
402
+ except AttributeError:
403
+ st.experimental_rerun()
404
 
405
+ # Initialize session state first
406
+ if "responses" not in st.session_state:
407
+ st.session_state.responses = []
408
+ if "processing" not in st.session_state:
409
+ st.session_state.processing = False
410
+ if "session_id" not in st.session_state:
411
+ st.session_state.session_id = str(uuid.uuid4())
412
 
 
 
 
413
 
 
 
 
 
 
 
 
 
 
 
414
 
 
 
 
 
415
 
416
+ def show_custom_response(response):
417
+ """Custom response display function with improved styling"""
418
+ role = response.get("role", "assistant")
419
+ content = response.get("content", "")
420
+
421
+ if role == "user":
422
+ # User message with right alignment - reduced margins
423
+ st.markdown(f"""
424
+ <div style='display: flex; justify-content: flex-end; margin: 1rem 0;'>
425
+ <div class='user-message'>
426
+ {content}
427
+ </div>
428
+ </div>
429
+ """, unsafe_allow_html=True)
430
+ elif role == "assistant":
431
+ # Check if content is an image filename - don't display the filename text
432
+ is_image_path = isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg', '.jpeg'])
433
+
434
+ # Check if content is a pandas DataFrame
435
+ import pandas as pd
436
+ is_dataframe = isinstance(content, pd.DataFrame)
437
+
438
+ # Check for errors first and display them with special styling
439
+ error = response.get("error")
440
+ timestamp = response.get("timestamp", "")
441
+ timestamp_display = f" • {timestamp}" if timestamp else ""
442
+
443
+ if error:
444
+ st.markdown(f"""
445
+ <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
446
+ <div class='assistant-message'>
447
+ <div class='assistant-info'>VayuChat{timestamp_display}</div>
448
+ <div class='error-message'>
449
+ ⚠️ <strong>Error:</strong> {error}
450
+ <br><br>
451
+ <em>💡 Try rephrasing your question or being more specific about what you'd like to analyze.</em>
452
+ </div>
453
+ </div>
454
+ </div>
455
+ """, unsafe_allow_html=True)
456
+ # Assistant message with left alignment - reduced margins
457
+ elif not is_image_path and not is_dataframe:
458
+ st.markdown(f"""
459
+ <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
460
+ <div class='assistant-message'>
461
+ <div class='assistant-info'>VayuChat{timestamp_display}</div>
462
+ {content if isinstance(content, str) else str(content)}
463
+ </div>
464
+ </div>
465
+ """, unsafe_allow_html=True)
466
+ elif is_dataframe:
467
+ # Display DataFrame with nice formatting
468
+ st.markdown(f"""
469
+ <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'>
470
+ <div class='assistant-message'>
471
+ <div class='assistant-info'>VayuChat{timestamp_display}</div>
472
+ Here are the results:
473
+ </div>
474
+ </div>
475
+ """, unsafe_allow_html=True)
476
+
477
+ # Add context info for dataframes
478
+ st.markdown("""
479
+ <div class='context-info'>
480
+ 💡 This table is interactive - click column headers to sort, or scroll to view all data.
481
+ </div>
482
+ """, unsafe_allow_html=True)
483
+
484
+ st.dataframe(content, use_container_width=True)
485
+
486
+ # Show generated code with Streamlit expander
487
+ if response.get("gen_code"):
488
+ with st.expander("📋 View Generated Code", expanded=False):
489
+ st.code(response["gen_code"], language="python")
490
+
491
+ # Try to display image if content is a file path
492
+ try:
493
+ if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
494
+ if os.path.exists(content):
495
+ # Display image without showing filename
496
+ st.image(content, width=800)
497
+ return {"is_image": True}
498
+ # Also handle case where content shows filename but we want to show image
499
+ elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']):
500
+ # Extract potential filename from content
501
+ import re
502
+ filename_match = re.search(r'([^/\\]+\.(?:png|jpg|jpeg))', content)
503
+ if filename_match:
504
+ filename = filename_match.group(1)
505
+ if os.path.exists(filename):
506
+ st.image(filename, width=800)
507
+ return {"is_image": True}
508
+ except:
509
+ pass
510
+
511
+ return {"is_image": False}
512
 
 
 
 
513
 
514
+ # Chat history
515
+ # Display chat history
516
+ for response_id, response in enumerate(st.session_state.responses):
517
+ status = show_custom_response(response)
518
+
519
+ # Show feedback section for assistant responses
520
+ if response["role"] == "assistant":
521
+ feedback_key = f"feedback_{int(response_id/2)}"
522
+ error = response.get("error", "")
523
+ output = response.get("content", "")
524
+ last_prompt = response.get("last_prompt", "")
525
+ code = response.get("gen_code", "")
526
 
 
 
 
 
 
 
 
 
527
 
528
+ # Beautiful action bar with feedback and retry
529
+ st.markdown('<div style="margin: 1.5rem 0 0.5rem 0;"></div>', unsafe_allow_html=True) # Spacer
530
+
531
+ if "feedback" in st.session_state.responses[response_id]:
532
+ # Show submitted feedback nicely
533
+ feedback_data = st.session_state.responses[response_id]["feedback"]
534
+ col1, col2 = st.columns([3, 1])
535
+ with col1:
536
+ st.markdown(f"""
537
+ <div style='
538
+ background: linear-gradient(135deg, #ecfdf5 0%, #d1fae5 100%);
539
+ border: 1px solid #a7f3d0;
540
+ border-radius: 8px;
541
+ padding: 0.75rem 1rem;
542
+ display: flex;
543
+ align-items: center;
544
+ gap: 8px;
545
+ '>
546
+ <span style='font-size: 1.1rem;'>{feedback_data.get('score', '')}</span>
547
+ <span style='color: #059669; font-weight: 500; font-size: 0.9rem;'>
548
+ Thanks for your feedback!
549
+ </span>
550
+ </div>
551
+ """, unsafe_allow_html=True)
552
+ with col2:
553
+ if st.button("🔄 Retry", key=f"retry_{response_id}", use_container_width=True):
554
+ user_prompt = ""
555
+ if response_id > 0:
556
+ user_prompt = st.session_state.responses[response_id-1].get("content", "")
557
+ if user_prompt:
558
+ if response_id > 0:
559
+ retry_prompt = st.session_state.responses[response_id-1].get("content", "")
560
+ del st.session_state.responses[response_id]
561
+ del st.session_state.responses[response_id-1]
562
+ st.session_state.follow_up_prompt = retry_prompt
563
+ st.rerun()
564
+ else:
565
+ # Clean feedback and retry layout
566
+ col1, col2, col3, col4 = st.columns([2, 2, 1, 1])
567
+
568
+ with col1:
569
+ if st.button("✨ Excellent", key=f"{feedback_key}_excellent", use_container_width=True):
570
+ feedback = {"score": "✨ Excellent", "text": ""}
571
+ st.session_state.responses[response_id]["feedback"] = feedback
572
+ st.rerun()
573
+
574
+ with col2:
575
+ if st.button("🔧 Needs work", key=f"{feedback_key}_poor", use_container_width=True):
576
+ feedback = {"score": "🔧 Needs work", "text": ""}
577
+ st.session_state.responses[response_id]["feedback"] = feedback
578
+ st.rerun()
579
+
580
+ with col4:
581
+ if st.button("🔄 Retry", key=f"retry_{response_id}", use_container_width=True):
582
+ user_prompt = ""
583
+ if response_id > 0:
584
+ user_prompt = st.session_state.responses[response_id-1].get("content", "")
585
+ if user_prompt:
586
+ if response_id > 0:
587
+ retry_prompt = st.session_state.responses[response_id-1].get("content", "")
588
+ del st.session_state.responses[response_id]
589
+ del st.session_state.responses[response_id-1]
590
+ st.session_state.follow_up_prompt = retry_prompt
591
+ st.rerun()
592
 
593
+ # Chat input with better guidance
594
+ prompt = st.chat_input("💬 Ask about air quality trends, pollution analysis, or city comparisons...", key="main_chat")
 
595
 
596
+ # Handle selected prompt from quick prompts
597
+ if selected_prompt:
598
+ prompt = selected_prompt
 
 
 
 
 
599
 
600
+ # Handle follow-up prompts from quick action buttons
601
+ if st.session_state.get("follow_up_prompt") and not st.session_state.get("processing"):
602
+ prompt = st.session_state.follow_up_prompt
603
+ st.session_state.follow_up_prompt = None # Clear the follow-up prompt
604
 
605
+ # Handle new queries
606
+ if prompt and not st.session_state.get("processing"):
607
+ # Prevent duplicate processing
608
+ if "last_prompt" in st.session_state:
609
+ last_prompt = st.session_state["last_prompt"]
610
+ last_model_name = st.session_state.get("last_model_name", "")
611
+ if (prompt == last_prompt) and (model_name == last_model_name):
612
+ prompt = None
613
 
614
+ if prompt:
615
+ # Add user input to chat history
616
+ user_response = get_from_user(prompt)
617
+ st.session_state.responses.append(user_response)
618
+
619
+ # Set processing state
620
+ st.session_state.processing = True
621
+ st.session_state.current_model = model_name
622
+ st.session_state.current_question = prompt
623
+
624
+ # Rerun to show processing indicator
625
+ st.rerun()
626
 
627
+ # Process the question if we're in processing state
628
+ if st.session_state.get("processing"):
629
+ # Enhanced processing indicator like Claude Code
630
+ st.markdown("""
631
+ <div style='padding: 1rem; text-align: center; background: #f8fafc; border-radius: 8px; margin: 1rem 0;'>
632
+ <div style='display: flex; align-items: center; justify-content: center; gap: 0.5rem; color: #475569;'>
633
+ <div style='font-weight: 500;'>🤖 Processing with """ + str(st.session_state.get('current_model', 'Unknown')) + """</div>
634
+ <div class='dots' style='display: inline-flex; gap: 2px;'>
635
+ <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out;'></div>
636
+ <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.16s;'></div>
637
+ <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.32s;'></div>
638
+ </div>
639
+ </div>
640
+ <div style='font-size: 0.75rem; color: #6b7280; margin-top: 0.25rem;'>Analyzing data and generating response...</div>
641
+ </div>
642
+ <style>
643
+ @keyframes bounce {
644
+ 0%, 80%, 100% { transform: scale(0.8); opacity: 0.5; }
645
+ 40% { transform: scale(1.2); opacity: 1; }
646
+ }
647
+ </style>
648
+ """, unsafe_allow_html=True)
649
+
650
+ prompt = st.session_state.get("current_question")
651
+ model_name = st.session_state.get("current_model")
652
+
653
+ try:
654
+ response = ask_question(model_name=model_name, question=prompt)
655
+
656
+ if not isinstance(response, dict):
657
+ response = {
658
+ "role": "assistant",
659
+ "content": "Error: Invalid response format",
660
+ "gen_code": "",
661
+ "ex_code": "",
662
+ "last_prompt": prompt,
663
+ "error": "Invalid response format",
664
+ "timestamp": datetime.now().strftime("%H:%M")
665
+ }
666
+
667
+ response.setdefault("role", "assistant")
668
+ response.setdefault("content", "No content generated")
669
+ response.setdefault("gen_code", "")
670
+ response.setdefault("ex_code", "")
671
+ response.setdefault("last_prompt", prompt)
672
+ response.setdefault("error", None)
673
+ response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
674
+
675
+ except Exception as e:
676
+ response = {
677
+ "role": "assistant",
678
+ "content": f"Sorry, I encountered an error: {str(e)}",
679
+ "gen_code": "",
680
+ "ex_code": "",
681
+ "last_prompt": prompt,
682
+ "error": str(e),
683
+ "timestamp": datetime.now().strftime("%H:%M")
684
+ }
685
+
686
+ st.session_state.responses.append(response)
687
+ st.session_state["last_prompt"] = prompt
688
+ st.session_state["last_model_name"] = model_name
689
+ st.session_state.processing = False
690
+
691
+ # Clear processing state
692
+ if "current_model" in st.session_state:
693
+ del st.session_state.current_model
694
+ if "current_question" in st.session_state:
695
+ del st.session_state.current_question
696
+
697
+ st.rerun()
698
+
699
+ # Close chat container
700
+ st.markdown("</div>", unsafe_allow_html=True)
701
+
702
+ # Minimal auto-scroll - only scroll when processing
703
+ if st.session_state.get("processing"):
704
+ st.markdown("<script>scrollToBottom();</script>", unsafe_allow_html=True)
705
+
706
+ # Dataset Info Section (matching mockup)
707
+ st.markdown("### Dataset Info")
708
+ st.markdown("""
709
+ <div style='background: #f1f5f9; border-radius: 8px; padding: 1rem; margin-bottom: 1rem;'>
710
+ <h4 style='margin: 0 0 0.5rem 0; color: #1e293b; font-size: 0.9rem;'>PM2.5 Air Quality Data</h4>
711
+ <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Time Range:</strong> 2022 - 2023</p>
712
+ <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Locations:</strong> 300+ cities across India</p>
713
+ <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Records:</strong> 100,000+ measurements</p>
714
+ </div>
715
+ """, unsafe_allow_html=True)
716
+
717
+
718
+ # streamlit adds each markdown's div, so its better to keep this in the last
719
+ # Custom CSS for beautiful styling
720
+ st.markdown("""
721
+ <style>
722
+ /* Clean app background */
723
+ .stApp {
724
+ background-color: #ffffff;
725
+ color: #212529;
726
+ font-family: 'Segoe UI', sans-serif;
727
  }
728
 
729
+ /* Reduce main container padding */
730
+ .main .block-container {
731
+ padding-top: 0px;
732
+ padding-bottom: 3rem;
733
+ max-width: 100%;
734
  }
735
 
736
+ /* Remove excessive spacing */
737
+ .element-container {
738
+ margin-bottom: 0.5rem !important;
 
 
 
739
  }
740
 
741
+ /* Fix sidebar spacing */
742
+ [data-testid="stSidebar"] .element-container {
743
+ margin-bottom: 0.25rem !important;
 
 
 
 
744
  }
745
 
746
+ /* Sidebar */
747
+ [data-testid="stSidebar"] {
748
+ background-color: #f8f9fa;
749
+ border-right: 1px solid #dee2e6;
750
+ padding: 1rem;
751
+ }
752
 
753
+ /* Optimize sidebar scrolling */
754
+ [data-testid="stSidebar"] > div:first-child {
755
+ height: 100vh;
756
  overflow-y: auto;
757
+ padding-bottom: 2rem;
758
  }
 
 
759
 
760
+ [data-testid="stSidebar"]::-webkit-scrollbar {
761
+ width: 6px;
 
 
 
 
 
 
 
 
 
762
  }
763
 
764
+ [data-testid="stSidebar"]::-webkit-scrollbar-track {
765
+ background: #f1f1f1;
766
+ border-radius: 3px;
 
 
 
 
 
 
 
 
767
  }
 
 
768
 
769
+ [data-testid="stSidebar"]::-webkit-scrollbar-thumb {
770
+ background: #c1c1c1;
771
+ border-radius: 3px;
772
+ }
773
 
774
+ [data-testid="stSidebar"]::-webkit-scrollbar-thumb:hover {
775
+ background: #a1a1a1;
776
+ }
 
777
 
778
+ /* Main title */
779
+ .main-title {
780
+ text-align: center;
781
+ color: #343a40;
782
+ font-size: 2.5rem;
783
+ font-weight: 700;
784
+ margin-bottom: 0.5rem;
 
 
 
 
 
 
 
 
785
  }
786
 
787
+ /* Subtitle */
788
+ .subtitle {
789
+ text-align: center;
790
+ color: #6c757d;
791
+ font-size: 1.1rem;
792
+ margin-bottom: 1.5rem;
793
+ }
794
 
795
+ /* Instructions */
796
+ .instructions {
797
+ background-color: #f1f3f5;
798
+ border-left: 4px solid #0d6efd;
799
+ padding: 1rem;
800
+ margin-bottom: 1.5rem;
801
+ border-radius: 6px;
802
+ color: #495057;
803
+ text-align: left;
804
+ }
805
 
806
+ /* Quick prompt buttons */
807
+ .quick-prompt-container {
808
+ display: flex;
809
+ flex-wrap: wrap;
810
+ gap: 8px;
811
+ margin-bottom: 1.5rem;
812
+ padding: 1rem;
813
+ background-color: #f8f9fa;
814
+ border-radius: 10px;
815
+ border: 1px solid #dee2e6;
816
+ }
817
 
818
+ .quick-prompt-btn {
819
+ background-color: #0d6efd;
820
+ color: white;
821
+ border: none;
822
+ padding: 8px 16px;
823
+ border-radius: 20px;
824
+ font-size: 0.9rem;
825
+ cursor: pointer;
826
+ transition: all 0.2s ease;
827
+ white-space: nowrap;
828
+ }
 
 
 
 
 
 
 
 
 
 
829
 
830
+ .quick-prompt-btn:hover {
831
+ background-color: #0b5ed7;
832
+ transform: translateY(-2px);
833
+ }
834
 
835
+ /* User message styling */
836
+ .user-message {
837
+ background: #3b82f6;
838
+ color: white;
839
+ padding: 0.75rem 1rem;
840
+ border-radius: 7px;
841
+ max-width: 95%;
842
+ }
843
 
844
+ .user-info {
845
+ font-size: 0.875rem;
846
+ opacity: 0.9;
847
+ margin-bottom: 3px;
848
+ }
849
 
850
+ /* Assistant message styling */
851
+ .assistant-message {
852
+ background: #f1f5f9;
853
+ color: #334155;
854
+ padding: 0.75rem 1rem;
855
+ border-radius: 12px;
856
+ max-width: 85%;
857
+ }
858
 
859
+ .assistant-info {
860
+ font-size: 0.875rem;
861
+ color: #6b7280;
862
+ margin-bottom: 5px;
863
+ }
864
 
865
+ /* Processing indicator */
866
+ .processing-indicator {
867
+ background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
868
+ color: #333;
869
+ padding: 1rem 1.5rem;
870
+ border-radius: 12px;
871
+ margin: 1rem 0;
872
+ margin-left: 0;
873
+ margin-right: auto;
874
+ max-width: 70%;
875
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
876
+ animation: pulse 2s infinite;
877
+ }
878
 
879
+ @keyframes pulse {
880
+ 0% { opacity: 1; }
881
+ 50% { opacity: 0.7; }
882
+ 100% { opacity: 1; }
883
+ }
 
884
 
885
+ /* Feedback box */
886
+ .feedback-section {
887
+ background-color: #f8f9fa;
888
+ border: 1px solid #dee2e6;
889
+ padding: 1rem;
890
+ border-radius: 8px;
891
+ margin: 1rem 0;
892
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
893
 
894
+ /* Success and error messages */
895
+ .success-message {
896
+ background-color: #d1e7dd;
897
+ color: #0f5132;
898
+ padding: 1rem;
899
+ border-radius: 6px;
900
+ border: 1px solid #badbcc;
901
+ }
 
 
 
 
 
 
902
 
903
+ .error-message {
904
+ background-color: #f8d7da;
905
+ color: #842029;
906
+ padding: 1rem;
907
+ border-radius: 6px;
908
+ border: 1px solid #f5c2c7;
909
+ }
910
 
911
+ /* Chat input styling - Fixed alignment */
912
+ # .stChatInput {
913
+ # border-radius: 12px !important;
914
+ # border: 2px solid #e5e7eb !important;
915
+ # background: #ffffff !important;
916
+ # padding: 0.75rem 1rem !important;
917
+ # font-size: 1rem !important;
918
+ # width: 100% !important;
919
+ # max-width: 70% !important;
920
+ # margin: 0 !important;
921
+ # box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important;
922
+ # transition: all 0.2s ease !important;
923
+ # }
924
 
925
+ # .stChatInput:focus {
926
+ # border-color: #3b82f6 !important;
927
+ # box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1) !important;
928
+ # outline: none !important;
929
+ # }
930
 
931
+ /* Chat input container */
932
+ .stChatInput > div {
933
+ padding: 0 !important;
934
+ margin: 0 !important;
935
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
936
 
937
+ /* Chat input text area */
938
+ # .stChatInput textarea {
939
+ # border: none !important;
940
+ # background: transparent !important;
941
+ # padding: 0 !important;
942
+ # margin: 0 !important;
943
+ # font-size: 1rem !important;
944
+ # line-height: 1.5 !important;
945
+ # resize: none !important;
946
+ # outline: none !important;
947
+ # }
948
 
949
+ /* Chat input placeholder */
950
+ # .stChatInput textarea::placeholder {
951
+ # color: #9ca3af !important;
952
+ # font-style: normal !important;
953
+ # }
 
954
 
955
+ .st-emotion-cache-f4ro0r {
956
+ align-items = center;
957
+ }
958
 
959
+ /* Fix the main chat input container alignment */
960
+ [data-testid="stChatInput"] {
961
+ position: fixed !important;
962
+ bottom: 0.5rem !important;
963
+ left: 6rem !important;
964
+ right: 0 !important;
965
+ background: #ffffff !important;
966
+ width: 65% !important;
967
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.1) !important;
968
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
969
 
970
+ /* Adjust main content to account for fixed chat input */
971
+ .main .block-container {
972
+ padding-bottom: 100px !important;
973
+ }
 
 
 
974
 
975
+ /* Chat input button styling */
976
+ [data-testid="stChatInput"] button {
977
+ background: #3b82f6 !important;
978
+ color: white !important;
979
+ border: none !important;
980
+ border-radius: 12px !important;
981
+ font-weight: 600 !important;
982
+ transition: background-color 0.2s ease !important;
983
+ }
984
 
985
+ [data-testid="stChatInput"] button:hover {
986
+ background: #2563eb !important;
987
+ }
988
 
989
+ /* Textarea inside chat input */
990
+ [data-testid="stChatInput"] [data-baseweb="textarea"] {
991
+ border: 2px solid #3b82f6 !important;
992
+ border-radius: 12px !important;
993
+ font-size: 16px !important;
994
+ color: #111 !important;
995
 
996
+ width: 100% !important; /* fill the parent container */
997
+ box-sizing: border-box !important;
998
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
999
 
1000
+ /* Ensure proper spacing from sidebar */
1001
+ @media (min-width: 768px) {
1002
+ [data-testid="stChatInput"] {
1003
+ margin-left: 21rem !important; /* Account for sidebar width */
1004
+ }
1005
+ }
1006
 
1007
+ /* Code container styling */
1008
+ .code-container {
1009
+ margin: 1rem 0;
1010
+ border: 1px solid #d1d5db;
1011
+ border-radius: 12px;
1012
+ background: white;
1013
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
1014
+ }
 
 
 
 
1015
 
1016
+ .code-header {
1017
+ display: flex;
1018
+ justify-content: space-between;
1019
+ align-items: center;
1020
+ padding: 0.875rem 1.25rem;
1021
+ background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%);
1022
+ border-bottom: 1px solid #e2e8f0;
1023
+ cursor: pointer;
1024
+ transition: all 0.2s ease;
1025
+ border-radius: 12px 12px 0 0;
1026
+ }
1027
 
1028
+ .code-header:hover {
1029
+ background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%);
1030
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031
 
1032
+ .code-title {
1033
+ font-size: 0.9rem;
1034
+ font-weight: 600;
1035
+ color: #1e293b;
1036
+ display: flex;
1037
+ align-items: center;
1038
+ gap: 0.5rem;
1039
+ }
1040
 
1041
+ .code-title:before {
1042
+ content: "⚡";
1043
+ font-size: 0.8rem;
1044
+ }
1045
 
1046
+ .toggle-text {
1047
+ font-size: 0.75rem;
1048
+ color: #64748b;
1049
+ font-weight: 500;
1050
+ }
1051
 
1052
+ .code-block {
1053
+ background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%);
1054
+ color: #e2e8f0;
1055
+ padding: 1.5rem;
1056
+ font-family: 'SF Mono', 'Monaco', 'Menlo', 'Consolas', monospace;
1057
+ font-size: 0.875rem;
1058
+ overflow-x: auto;
1059
+ line-height: 1.6;
1060
+ border-radius: 0 0 12px 12px;
1061
+ }
1062
 
1063
+ .answer-container {
1064
+ background: #f8fafc;
1065
+ border: 1px solid #e2e8f0;
1066
+ border-radius: 8px;
1067
+ padding: 1.5rem;
1068
+ margin: 1rem 0;
1069
+ }
 
 
 
 
 
1070
 
1071
+ .answer-text {
1072
+ font-size: 1.125rem;
1073
+ color: #1e293b;
1074
+ line-height: 1.6;
1075
+ margin-bottom: 1rem;
1076
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077
 
1078
+ .answer-highlight {
1079
+ background: #fef3c7;
1080
+ padding: 0.125rem 0.375rem;
1081
+ border-radius: 4px;
1082
+ font-weight: 600;
1083
+ color: #92400e;
1084
+ }
 
 
 
 
 
1085
 
1086
+ .context-info {
1087
+ background: #f1f5f9;
1088
+ border-left: 4px solid #3b82f6;
1089
+ padding: 0.75rem 1rem;
1090
+ margin: 1rem 0;
1091
+ font-size: 0.875rem;
1092
+ color: #475569;
1093
+ }
1094
 
1095
+ /* Hide default menu and footer */
1096
+ #MainMenu {visibility: hidden;}
1097
+ footer {visibility: hidden;}
1098
+ header {visibility: hidden;}
1099
 
1100
+ /* Auto scroll */
1101
+ .main-container {
1102
+ height: 70vh;
1103
+ overflow-y: auto;
1104
+ }
1105
+ </style>
1106
+ """, unsafe_allow_html=True)