Nipun Claude commited on
Commit
7875cb9
Β·
1 Parent(s): 8673e7c

Fix indexing error and enhance processing indicator

Browse files

- Add critical rule for calendar.month_name indexing: convert pandas values to int()
- Fix spinner not showing - moved actual processing inside spinner context
- Replace simple spinner with animated Claude Code-style processing indicator
- Added bouncing dots animation and descriptive text for better UX
- Fixed indentation issues in processing flow

πŸ€– Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <[email protected]>

Files changed (2) hide show
  1. app.py +58 -43
  2. src.py +1 -0
app.py CHANGED
@@ -803,11 +803,6 @@ for response_id, response in enumerate(st.session_state.responses):
803
  st.success("Thanks for your feedback!")
804
  st.rerun()
805
 
806
- # Show processing indicator if processing (only once at the bottom)
807
- if st.session_state.get("processing"):
808
- with st.spinner(f"Processing with {st.session_state.get('current_model', 'Unknown')}..."):
809
- pass
810
-
811
  # Chat input with better guidance
812
  prompt = st.chat_input("πŸ’¬ Ask about air quality trends, compare cities, or request visualizations...", key="main_chat")
813
 
@@ -844,54 +839,74 @@ if prompt and not st.session_state.get("processing"):
844
 
845
  # Process the question if we're in processing state
846
  if st.session_state.get("processing"):
847
- prompt = st.session_state.get("current_question")
848
- model_name = st.session_state.get("current_model")
849
-
850
- try:
851
- response = ask_question(model_name=model_name, question=prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
852
 
853
- if not isinstance(response, dict):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  response = {
855
  "role": "assistant",
856
- "content": "Error: Invalid response format",
857
  "gen_code": "",
858
  "ex_code": "",
859
  "last_prompt": prompt,
860
- "error": "Invalid response format",
861
  "timestamp": datetime.now().strftime("%H:%M")
862
  }
 
 
 
 
 
863
 
864
- response.setdefault("role", "assistant")
865
- response.setdefault("content", "No content generated")
866
- response.setdefault("gen_code", "")
867
- response.setdefault("ex_code", "")
868
- response.setdefault("last_prompt", prompt)
869
- response.setdefault("error", None)
870
- response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
871
 
872
- except Exception as e:
873
- response = {
874
- "role": "assistant",
875
- "content": f"Sorry, I encountered an error: {str(e)}",
876
- "gen_code": "",
877
- "ex_code": "",
878
- "last_prompt": prompt,
879
- "error": str(e),
880
- "timestamp": datetime.now().strftime("%H:%M")
881
- }
882
-
883
- st.session_state.responses.append(response)
884
- st.session_state["last_prompt"] = prompt
885
- st.session_state["last_model_name"] = model_name
886
- st.session_state.processing = False
887
-
888
- # Clear processing state
889
- if "current_model" in st.session_state:
890
- del st.session_state.current_model
891
- if "current_question" in st.session_state:
892
- del st.session_state.current_question
893
-
894
- st.rerun()
895
 
896
  # Close chat container
897
  st.markdown("</div>", unsafe_allow_html=True)
 
803
  st.success("Thanks for your feedback!")
804
  st.rerun()
805
 
 
 
 
 
 
806
  # Chat input with better guidance
807
  prompt = st.chat_input("πŸ’¬ Ask about air quality trends, compare cities, or request visualizations...", key="main_chat")
808
 
 
839
 
840
  # Process the question if we're in processing state
841
  if st.session_state.get("processing"):
842
+ # Enhanced processing indicator like Claude Code
843
+ st.markdown("""
844
+ <div style='padding: 1rem; text-align: center; background: #f8fafc; border-radius: 8px; margin: 1rem 0;'>
845
+ <div style='display: flex; align-items: center; justify-content: center; gap: 0.5rem; color: #475569;'>
846
+ <div style='font-weight: 500;'>πŸ€– Processing with """ + str(st.session_state.get('current_model', 'Unknown')) + """</div>
847
+ <div class='dots' style='display: inline-flex; gap: 2px;'>
848
+ <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out;'></div>
849
+ <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.16s;'></div>
850
+ <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.32s;'></div>
851
+ </div>
852
+ </div>
853
+ <div style='font-size: 0.75rem; color: #6b7280; margin-top: 0.25rem;'>Analyzing data and generating response...</div>
854
+ </div>
855
+ <style>
856
+ @keyframes bounce {
857
+ 0%, 80%, 100% { transform: scale(0.8); opacity: 0.5; }
858
+ 40% { transform: scale(1.2); opacity: 1; }
859
+ }
860
+ </style>
861
+ """, unsafe_allow_html=True)
862
+ prompt = st.session_state.get("current_question")
863
+ model_name = st.session_state.get("current_model")
864
 
865
+ try:
866
+ response = ask_question(model_name=model_name, question=prompt)
867
+
868
+ if not isinstance(response, dict):
869
+ response = {
870
+ "role": "assistant",
871
+ "content": "Error: Invalid response format",
872
+ "gen_code": "",
873
+ "ex_code": "",
874
+ "last_prompt": prompt,
875
+ "error": "Invalid response format",
876
+ "timestamp": datetime.now().strftime("%H:%M")
877
+ }
878
+
879
+ response.setdefault("role", "assistant")
880
+ response.setdefault("content", "No content generated")
881
+ response.setdefault("gen_code", "")
882
+ response.setdefault("ex_code", "")
883
+ response.setdefault("last_prompt", prompt)
884
+ response.setdefault("error", None)
885
+ response.setdefault("timestamp", datetime.now().strftime("%H:%M"))
886
+
887
+ except Exception as e:
888
  response = {
889
  "role": "assistant",
890
+ "content": f"Sorry, I encountered an error: {str(e)}",
891
  "gen_code": "",
892
  "ex_code": "",
893
  "last_prompt": prompt,
894
+ "error": str(e),
895
  "timestamp": datetime.now().strftime("%H:%M")
896
  }
897
+
898
+ st.session_state.responses.append(response)
899
+ st.session_state["last_prompt"] = prompt
900
+ st.session_state["last_model_name"] = model_name
901
+ st.session_state.processing = False
902
 
903
+ # Clear processing state
904
+ if "current_model" in st.session_state:
905
+ del st.session_state.current_model
906
+ if "current_question" in st.session_state:
907
+ del st.session_state.current_question
 
 
908
 
909
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
910
 
911
  # Close chat container
912
  st.markdown("</div>", unsafe_allow_html=True)
src.py CHANGED
@@ -326,6 +326,7 @@ VARIABLE & TYPE HANDLING:
326
  - Convert pandas/numpy objects to proper Python types before operations
327
  - Convert datetime/period objects appropriately: .astype(str), .dt.strftime(), int()
328
  - Always cast to appropriate types for indexing: int(), str(), list()
 
329
  - Use explicit type conversions rather than relying on implicit casting
330
 
331
  PANDAS OPERATIONS:
 
326
  - Convert pandas/numpy objects to proper Python types before operations
327
  - Convert datetime/period objects appropriately: .astype(str), .dt.strftime(), int()
328
  - Always cast to appropriate types for indexing: int(), str(), list()
329
+ - CRITICAL: Convert pandas/numpy values to int before list indexing: int(value) for calendar.month_name[int(month_value)]
330
  - Use explicit type conversions rather than relying on implicit casting
331
 
332
  PANDAS OPERATIONS: