Rulga commited on
Commit
c629e49
·
1 Parent(s): 8c4af83

Enhance error handling and add health check endpoint; refactor report generation in LogAnalyzer

Browse files
Files changed (3) hide show
  1. api/analysis.py +17 -21
  2. app.py +111 -29
  3. requirements.txt +5 -0
api/analysis.py CHANGED
@@ -67,24 +67,20 @@ class LogAnalyzer:
67
  stats = self.get_basic_stats()
68
  temporal = self.temporal_analysis()
69
 
70
- report = f"""
71
- Legal Assistant Usage Report
72
- ----------------------------
73
- Period: {self.logs[0]['timestamp']} - {self.logs[-1]['timestamp']}
74
-
75
- Total Interactions: {stats['total_interactions']}
76
- Unique Users: {stats['unique_users']}
77
- Average Response Length: {stats['avg_response_length']:.1f} chars
78
-
79
- Top Questions:
80
- {''.join(f"{q['question']}: {q['count']}\n" for q in stats['most_common_questions'])}
81
-
82
- Knowledge Base Usage:
83
- - With context: {stats['knowledge_base_usage'].get('with_context', 0)}
84
- - Without context: {stats['knowledge_base_usage'].get('without_context', 0)}
85
-
86
- Usage Patterns:
87
- - Daily Activity: {temporal['daily_activity']}
88
- - Hourly Distribution: {temporal['hourly_pattern']}
89
- """
90
- return report
 
67
  stats = self.get_basic_stats()
68
  temporal = self.temporal_analysis()
69
 
70
+ report = (
71
+ "Legal Assistant Usage Report\n"
72
+ "----------------------------\n"
73
+ f"Period: {self.logs[0]['timestamp']} - {self.logs[-1]['timestamp']}\n\n"
74
+ f"Total Interactions: {stats['total_interactions']}\n"
75
+ f"Unique Users: {stats['unique_users']}\n"
76
+ f"Average Response Length: {stats['avg_response_length']:.1f} chars\n\n"
77
+ "Top Questions:\n"
78
+ + "".join(f"- {q['question']}: {q['count']}\n" for q in stats['most_common_questions'])
79
+ + "\nKnowledge Base Usage:\n"
80
+ f"- With context: {stats['knowledge_base_usage'].get('with_context', 0)}\n"
81
+ f"- Without context: {stats['knowledge_base_usage'].get('without_context', 0)}\n\n"
82
+ "Usage Patterns:\n"
83
+ f"- Daily Activity: {temporal['daily_activity']}\n"
84
+ f"- Hourly Distribution: {temporal['hourly_pattern']}\n"
85
+ )
86
+ return report
 
 
 
 
app.py CHANGED
@@ -11,10 +11,14 @@ from langchain_core.output_parsers import StrOutputParser
11
  from datetime import datetime
12
  import json
13
  import traceback
14
- from fastapi import FastAPI, HTTPException
 
15
  from pydantic import BaseModel
16
  from api import router as analysis_router
17
  from utils import ChatAnalyzer, setup_chat_analysis
 
 
 
18
 
19
  # Initialize environment variables
20
  load_dotenv()
@@ -22,6 +26,29 @@ load_dotenv()
22
  app = FastAPI(title="Status Law Assistant API")
23
  app.include_router(analysis_router)
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  # --------------- Model Initialization ---------------
26
  def init_models():
27
  """Initialize AI models"""
@@ -118,37 +145,63 @@ async def chat_endpoint(request: ChatRequest):
118
  allow_dangerous_deserialization=True
119
  )
120
 
121
- context_docs = vector_store.similarity_search(request.message)
122
- context_text = "\n".join([d.page_content for d in context_docs])
 
123
 
124
- prompt_template = PromptTemplate.from_template('''
125
- You are a helpful and polite legal assistant at Status Law.
126
- You answer in the language in which the question was asked.
127
- Answer the question based on the context provided.
128
-
129
- # ... остальной текст промпта ...
 
 
 
 
 
130
 
131
- Context: {context}
132
- Question: {question}
133
-
134
- Response Guidelines:
135
- 1. Answer in the user's language
136
- 2. Cite sources when possible
137
- 3. Offer contact options if unsure
138
- ''')
139
-
140
- chain = prompt_template | llm | StrOutputParser()
141
- response = chain.invoke({
142
- "context": context_text,
143
- "question": request.message
144
- })
145
-
146
- # Log interaction
147
- log_interaction(request.message, response, context_text)
148
-
149
- return ChatResponse(response=response)
150
-
 
 
 
 
 
 
 
 
 
 
 
151
  except Exception as e:
 
 
 
 
 
 
 
 
 
152
  raise HTTPException(status_code=500, detail=str(e))
153
 
154
  # --------------- Logging ---------------
@@ -172,6 +225,35 @@ def log_interaction(user_input: str, bot_response: str, context: str):
172
  print(f"Logging error: {str(e)}")
173
  print(traceback.format_exc())
174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  if __name__ == "__main__":
176
  import uvicorn
177
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
11
  from datetime import datetime
12
  import json
13
  import traceback
14
+ from fastapi import FastAPI, HTTPException, Request
15
+ from fastapi.responses import JSONResponse
16
  from pydantic import BaseModel
17
  from api import router as analysis_router
18
  from utils import ChatAnalyzer, setup_chat_analysis
19
+ import requests.exceptions
20
+ import aiohttp
21
+ from typing import Union
22
 
23
  # Initialize environment variables
24
  load_dotenv()
 
26
  app = FastAPI(title="Status Law Assistant API")
27
  app.include_router(analysis_router)
28
 
29
+ # Add custom exception handlers
30
+ @app.exception_handler(requests.exceptions.RequestException)
31
+ async def network_error_handler(request: Request, exc: requests.exceptions.RequestException):
32
+ return JSONResponse(
33
+ status_code=503,
34
+ content={
35
+ "error": "Network error occurred",
36
+ "detail": str(exc),
37
+ "type": "network_error"
38
+ }
39
+ )
40
+
41
+ @app.exception_handler(aiohttp.ClientError)
42
+ async def aiohttp_error_handler(request: Request, exc: aiohttp.ClientError):
43
+ return JSONResponse(
44
+ status_code=503,
45
+ content={
46
+ "error": "Network error occurred",
47
+ "detail": str(exc),
48
+ "type": "network_error"
49
+ }
50
+ )
51
+
52
  # --------------- Model Initialization ---------------
53
  def init_models():
54
  """Initialize AI models"""
 
145
  allow_dangerous_deserialization=True
146
  )
147
 
148
+ # Add retry logic for network operations
149
+ max_retries = 3
150
+ retry_count = 0
151
 
152
+ while retry_count < max_retries:
153
+ try:
154
+ context_docs = vector_store.similarity_search(request.message)
155
+ context_text = "\n".join([d.page_content for d in context_docs])
156
+
157
+ prompt_template = PromptTemplate.from_template('''
158
+ You are a helpful and polite legal assistant at Status Law.
159
+ You answer in the language in which the question was asked.
160
+ Answer the question based on the context provided.
161
+
162
+ # ... остальной текст промпта ...
163
 
164
+ Context: {context}
165
+ Question: {question}
166
+
167
+ Response Guidelines:
168
+ 1. Answer in the user's language
169
+ 2. Cite sources when possible
170
+ 3. Offer contact options if unsure
171
+ ''')
172
+
173
+ chain = prompt_template | llm | StrOutputParser()
174
+ response = chain.invoke({
175
+ "context": context_text,
176
+ "question": request.message
177
+ })
178
+
179
+ log_interaction(request.message, response, context_text)
180
+ return ChatResponse(response=response)
181
+
182
+ except (requests.exceptions.RequestException, aiohttp.ClientError) as e:
183
+ retry_count += 1
184
+ if retry_count == max_retries:
185
+ raise HTTPException(
186
+ status_code=503,
187
+ detail={
188
+ "error": "Network error after maximum retries",
189
+ "detail": str(e),
190
+ "type": "network_error"
191
+ }
192
+ )
193
+ await asyncio.sleep(1 * retry_count) # Exponential backoff
194
+
195
  except Exception as e:
196
+ if isinstance(e, (requests.exceptions.RequestException, aiohttp.ClientError)):
197
+ raise HTTPException(
198
+ status_code=503,
199
+ detail={
200
+ "error": "Network error occurred",
201
+ "detail": str(e),
202
+ "type": "network_error"
203
+ }
204
+ )
205
  raise HTTPException(status_code=500, detail=str(e))
206
 
207
  # --------------- Logging ---------------
 
225
  print(f"Logging error: {str(e)}")
226
  print(traceback.format_exc())
227
 
228
+ # Add health check endpoint
229
+ @app.get("/health")
230
+ async def health_check():
231
+ try:
232
+ # Check if models can be initialized
233
+ llm, embeddings = init_models()
234
+
235
+ # Check if vector store is accessible
236
+ if os.path.exists(VECTOR_STORE_PATH):
237
+ vector_store = FAISS.load_local(
238
+ VECTOR_STORE_PATH,
239
+ embeddings,
240
+ allow_dangerous_deserialization=True
241
+ )
242
+
243
+ return {
244
+ "status": "healthy",
245
+ "vector_store": "available" if os.path.exists(VECTOR_STORE_PATH) else "not_found"
246
+ }
247
+
248
+ except Exception as e:
249
+ return JSONResponse(
250
+ status_code=503,
251
+ content={
252
+ "status": "unhealthy",
253
+ "error": str(e)
254
+ }
255
+ )
256
+
257
  if __name__ == "__main__":
258
  import uvicorn
259
  uvicorn.run(app, host="0.0.0.0", port=8000)
requirements.txt CHANGED
@@ -18,3 +18,8 @@ plotly
18
  pytest
19
  httpx
20
  pytest-asyncio
 
 
 
 
 
 
18
  pytest
19
  httpx
20
  pytest-asyncio
21
+ aiohttp
22
+ requests
23
+ tenacity
24
+
25
+