Rivalcoder commited on
Commit
836bc0e
Β·
1 Parent(s): 6dd4fed

New Version Updated

Browse files
pdf_parser.py β†’ Old_Files/pdf_parser.py RENAMED
File without changes
app.py CHANGED
@@ -4,7 +4,6 @@ import logging
4
  import time
5
  import json
6
  import hashlib
7
- from datetime import datetime
8
  from concurrent.futures import ThreadPoolExecutor
9
  from threading import Lock
10
  import re
@@ -13,7 +12,6 @@ import re
13
  cache_dir = os.path.join(os.getcwd(), ".cache")
14
  os.makedirs(cache_dir, exist_ok=True)
15
  os.environ['HF_HOME'] = cache_dir
16
- os.environ['TRANSFORMERS_CACHE'] = cache_dir
17
 
18
  # Suppress TensorFlow warnings
19
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
@@ -24,16 +22,41 @@ os.environ['TF_ENABLE_DEPRECATION_WARNINGS'] = '0'
24
  warnings.filterwarnings('ignore', category=DeprecationWarning, module='tensorflow')
25
  logging.getLogger('tensorflow').setLevel(logging.ERROR)
26
 
27
- from fastapi import FastAPI, HTTPException, Depends, Header, Query
28
  from fastapi.middleware.cors import CORSMiddleware
29
  from pydantic import BaseModel
30
- from pdf_parser import parse_pdf_from_url_multithreaded as parse_pdf_from_url, parse_pdf_from_file_multithreaded as parse_pdf_from_file
31
  from embedder import build_faiss_index, preload_model
32
  from retriever import retrieve_chunks
33
  from llm import query_gemini
34
  import uvicorn
 
35
 
36
- app = FastAPI(title="HackRx Insurance Policy Assistant", version="1.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  app.add_middleware(
39
  CORSMiddleware,
@@ -43,12 +66,6 @@ app.add_middleware(
43
  allow_headers=["*"],
44
  )
45
 
46
- @app.on_event("startup")
47
- async def startup_event():
48
- print("Starting up HackRx Insurance Policy Assistant...")
49
- print("Preloading sentence transformer model...")
50
- preload_model()
51
- print("Model preloading completed. API is ready to serve requests!")
52
 
53
  @app.get("/")
54
  async def root():
@@ -58,6 +75,7 @@ async def root():
58
  async def health_check():
59
  return {"status": "healthy"}
60
 
 
61
  class QueryRequest(BaseModel):
62
  documents: str
63
  questions: list[str]
@@ -66,6 +84,7 @@ class LocalQueryRequest(BaseModel):
66
  document_path: str
67
  questions: list[str]
68
 
 
69
  def verify_token(authorization: str = Header(None)):
70
  if not authorization or not authorization.startswith("Bearer "):
71
  raise HTTPException(status_code=401, detail="Invalid authorization header")
@@ -83,25 +102,17 @@ def get_document_id_from_url(url: str) -> str:
83
  def question_has_https_link(q: str) -> bool:
84
  return bool(re.search(r"https://[^\s]+", q))
85
 
 
86
  # Document cache with thread safety
87
  doc_cache = {}
88
  doc_cache_lock = Lock()
89
 
90
- # ----------------- CACHE CLEAR ENDPOINT -----------------
91
  @app.delete("/api/v1/cache/clear")
92
- async def clear_cache(doc_id: str = Query(None, description="Optional document ID to clear"),
93
- url: str = Query(None, description="Optional document URL to clear"),
94
- doc_only: bool = Query(False, description="If true, only clear document cache")):
95
- """
96
- Clear cache data.
97
- - No params: Clears ALL caches.
98
- - doc_id: Clears caches for that document only.
99
- - url: Same as doc_id but computed automatically from URL.
100
- - doc_only: Clears only document cache.
101
- """
102
  cleared = {}
103
-
104
- # If URL is provided, convert to doc_id
105
  if url:
106
  doc_id = get_document_id_from_url(url)
107
 
@@ -119,19 +130,20 @@ async def clear_cache(doc_id: str = Query(None, description="Optional document I
119
 
120
  return {"status": "success", "cleared": cleared}
121
 
 
122
  @app.post("/api/v1/hackrx/run")
123
- async def run_query(request: QueryRequest, token: str = Depends(verify_token)):
124
  start_time = time.time()
125
  timing_data = {}
126
  try:
 
 
 
127
  print("=== INPUT JSON ===")
128
  print(json.dumps({"documents": request.documents, "questions": request.questions}, indent=2))
129
  print("==================\n")
130
 
131
- print(f"Processing {len(request.questions)} questions...")
132
-
133
- # PDF Parsing and FAISS Caching (keep document caching for speed)
134
- doc_id = get_document_id_from_url(request.documents)
135
  with doc_cache_lock:
136
  if doc_id in doc_cache:
137
  print("βœ… Using cached document...")
@@ -142,7 +154,7 @@ async def run_query(request: QueryRequest, token: str = Depends(verify_token)):
142
  else:
143
  print("βš™οΈ Parsing and indexing new document...")
144
  pdf_start = time.time()
145
- text_chunks = parse_pdf_from_url(request.documents)
146
  timing_data['pdf_parsing'] = round(time.time() - pdf_start, 2)
147
 
148
  index_start = time.time()
@@ -155,18 +167,13 @@ async def run_query(request: QueryRequest, token: str = Depends(verify_token)):
155
  "texts": texts
156
  }
157
 
158
- # Retrieve chunks for all questions β€” no QA caching
159
  retrieval_start = time.time()
160
  all_chunks = set()
161
- question_positions = {}
162
  for idx, question in enumerate(request.questions):
163
  top_chunks = retrieve_chunks(index, texts, question)
164
  all_chunks.update(top_chunks)
165
- question_positions.setdefault(question, []).append(idx)
166
  timing_data['chunk_retrieval'] = round(time.time() - retrieval_start, 2)
167
- print(f"Retrieved {len(all_chunks)} unique chunks for all questions")
168
 
169
- # Query Gemini LLM fresh for all questions
170
  context_chunks = list(all_chunks)
171
  batch_size = 10
172
  batches = [(i, request.questions[i:i + batch_size]) for i in range(0, len(request.questions), batch_size)]
@@ -190,38 +197,41 @@ async def run_query(request: QueryRequest, token: str = Depends(verify_token)):
190
  timing_data['llm_processing'] = round(time.time() - llm_start, 2)
191
 
192
  responses = [results_dict.get(i, "Not Found") for i in range(len(request.questions))]
193
- timing_data['total_time'] = round(time.time() - start_time, 2)
194
-
195
- print(f"\n=== TIMING BREAKDOWN ===")
196
- for k, v in timing_data.items():
197
- print(f"{k}: {v}s")
198
- print(f"=======================\n")
199
-
200
- print(f"=== OUTPUT JSON ===")
201
- print(json.dumps({"answers": responses}, indent=2))
202
- print(f"==================\n")
 
 
 
203
 
204
  return {"answers": responses}
205
 
206
  except Exception as e:
207
- print(f"Error: {str(e)}")
208
  raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
209
 
 
210
  @app.post("/api/v1/hackrx/local")
211
- async def run_local_query(request: LocalQueryRequest):
212
  start_time = time.time()
213
  timing_data = {}
214
  try:
 
 
 
215
  print("=== INPUT JSON ===")
216
  print(json.dumps({"document_path": request.document_path, "questions": request.questions}, indent=2))
217
  print("==================\n")
218
 
219
- print(f"Processing {len(request.questions)} questions locally...")
220
-
221
  pdf_start = time.time()
222
- text_chunks = parse_pdf_from_file(request.document_path)
223
  timing_data['pdf_parsing'] = round(time.time() - pdf_start, 2)
224
- print(f"Extracted {len(text_chunks)} text chunks from PDF")
225
 
226
  index_start = time.time()
227
  index, texts = build_faiss_index(text_chunks)
@@ -233,12 +243,10 @@ async def run_local_query(request: LocalQueryRequest):
233
  top_chunks = retrieve_chunks(index, texts, question)
234
  all_chunks.update(top_chunks)
235
  timing_data['chunk_retrieval'] = round(time.time() - retrieval_start, 2)
236
- print(f"Retrieved {len(all_chunks)} unique chunks")
237
 
238
- questions = request.questions
239
  context_chunks = list(all_chunks)
240
  batch_size = 20
241
- batches = [(i, questions[i:i + batch_size]) for i in range(0, len(questions), batch_size)]
242
 
243
  llm_start = time.time()
244
  results_dict = {}
@@ -258,24 +266,27 @@ async def run_local_query(request: LocalQueryRequest):
258
  results_dict[start_idx + j] = f"Error: {str(e)}"
259
  timing_data['llm_processing'] = round(time.time() - llm_start, 2)
260
 
261
- responses = [results_dict.get(i, "Not Found") for i in range(len(questions))]
262
- timing_data['total_time'] = round(time.time() - start_time, 2)
263
-
264
- print(f"\n=== TIMING BREAKDOWN ===")
265
- for k, v in timing_data.items():
266
- print(f"{k}: {v}s")
267
- print(f"=======================\n")
268
-
269
- print(f"=== OUTPUT JSON ===")
270
- print(json.dumps({"answers": responses}, indent=2))
271
- print(f"==================\n")
 
 
 
272
 
273
  return {"answers": responses}
274
 
275
  except Exception as e:
276
- print(f"Error: {str(e)}")
277
  raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
278
 
 
279
  if __name__ == "__main__":
280
  port = int(os.environ.get("PORT", 7860))
281
  uvicorn.run("app:app", host="0.0.0.0", port=port)
 
4
  import time
5
  import json
6
  import hashlib
 
7
  from concurrent.futures import ThreadPoolExecutor
8
  from threading import Lock
9
  import re
 
12
  cache_dir = os.path.join(os.getcwd(), ".cache")
13
  os.makedirs(cache_dir, exist_ok=True)
14
  os.environ['HF_HOME'] = cache_dir
 
15
 
16
  # Suppress TensorFlow warnings
17
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
 
22
  warnings.filterwarnings('ignore', category=DeprecationWarning, module='tensorflow')
23
  logging.getLogger('tensorflow').setLevel(logging.ERROR)
24
 
25
+ from fastapi import FastAPI, HTTPException, Depends, Header, Query, Request
26
  from fastapi.middleware.cors import CORSMiddleware
27
  from pydantic import BaseModel
28
+ from content_readers import parse_document_url, parse_document_file
29
  from embedder import build_faiss_index, preload_model
30
  from retriever import retrieve_chunks
31
  from llm import query_gemini
32
  import uvicorn
33
+ from contextlib import asynccontextmanager
34
 
35
+ # Import Supabase logger
36
+ from db_logger import log_query
37
+
38
+
39
+ # Helper to get real client IP
40
+ def get_client_ip(request: Request):
41
+ forwarded_for = request.headers.get("x-forwarded-for")
42
+ if forwarded_for:
43
+ return forwarded_for.split(",")[0].strip()
44
+ real_ip = request.headers.get("x-real-ip")
45
+ if real_ip:
46
+ return real_ip
47
+ return request.client.host
48
+
49
+
50
+ @asynccontextmanager
51
+ async def lifespan(app: FastAPI):
52
+ print("Starting up HackRx Insurance Policy Assistant...")
53
+ print("Preloading sentence transformer model...")
54
+ preload_model()
55
+ print("Model preloading completed. API is ready to serve requests!")
56
+ yield
57
+
58
+
59
+ app = FastAPI(title="HackRx Insurance Policy Assistant", version="3.2.6", lifespan=lifespan)
60
 
61
  app.add_middleware(
62
  CORSMiddleware,
 
66
  allow_headers=["*"],
67
  )
68
 
 
 
 
 
 
 
69
 
70
  @app.get("/")
71
  async def root():
 
75
  async def health_check():
76
  return {"status": "healthy"}
77
 
78
+
79
  class QueryRequest(BaseModel):
80
  documents: str
81
  questions: list[str]
 
84
  document_path: str
85
  questions: list[str]
86
 
87
+
88
  def verify_token(authorization: str = Header(None)):
89
  if not authorization or not authorization.startswith("Bearer "):
90
  raise HTTPException(status_code=401, detail="Invalid authorization header")
 
102
  def question_has_https_link(q: str) -> bool:
103
  return bool(re.search(r"https://[^\s]+", q))
104
 
105
+
106
  # Document cache with thread safety
107
  doc_cache = {}
108
  doc_cache_lock = Lock()
109
 
110
+
111
  @app.delete("/api/v1/cache/clear")
112
+ async def clear_cache(doc_id: str = Query(None),
113
+ url: str = Query(None),
114
+ doc_only: bool = Query(False)):
 
 
 
 
 
 
 
115
  cleared = {}
 
 
116
  if url:
117
  doc_id = get_document_id_from_url(url)
118
 
 
130
 
131
  return {"status": "success", "cleared": cleared}
132
 
133
+
134
  @app.post("/api/v1/hackrx/run")
135
+ async def run_query(request: QueryRequest, fastapi_request: Request, token: str = Depends(verify_token)):
136
  start_time = time.time()
137
  timing_data = {}
138
  try:
139
+ user_ip = get_client_ip(fastapi_request)
140
+ user_agent = fastapi_request.headers.get("user-agent", "Unknown")
141
+
142
  print("=== INPUT JSON ===")
143
  print(json.dumps({"documents": request.documents, "questions": request.questions}, indent=2))
144
  print("==================\n")
145
 
146
+ doc_id = get_document_id_from_url(request.documents or "")
 
 
 
147
  with doc_cache_lock:
148
  if doc_id in doc_cache:
149
  print("βœ… Using cached document...")
 
154
  else:
155
  print("βš™οΈ Parsing and indexing new document...")
156
  pdf_start = time.time()
157
+ text_chunks = parse_document_url(request.documents)
158
  timing_data['pdf_parsing'] = round(time.time() - pdf_start, 2)
159
 
160
  index_start = time.time()
 
167
  "texts": texts
168
  }
169
 
 
170
  retrieval_start = time.time()
171
  all_chunks = set()
 
172
  for idx, question in enumerate(request.questions):
173
  top_chunks = retrieve_chunks(index, texts, question)
174
  all_chunks.update(top_chunks)
 
175
  timing_data['chunk_retrieval'] = round(time.time() - retrieval_start, 2)
 
176
 
 
177
  context_chunks = list(all_chunks)
178
  batch_size = 10
179
  batches = [(i, request.questions[i:i + batch_size]) for i in range(0, len(request.questions), batch_size)]
 
197
  timing_data['llm_processing'] = round(time.time() - llm_start, 2)
198
 
199
  responses = [results_dict.get(i, "Not Found") for i in range(len(request.questions))]
200
+ total_time = time.time() - start_time
201
+ timing_data['total_time'] = round(total_time, 2)
202
+
203
+ # Log to Supabase with user_agent + geo_location
204
+ for q, a in zip(request.questions, responses):
205
+ log_query(
206
+ document_source=request.documents or "UNKNOWN",
207
+ question=q,
208
+ answer=a,
209
+ ip_address=user_ip,
210
+ user_agent=user_agent,
211
+ response_time=total_time
212
+ )
213
 
214
  return {"answers": responses}
215
 
216
  except Exception as e:
 
217
  raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
218
 
219
+
220
  @app.post("/api/v1/hackrx/local")
221
+ async def run_local_query(request: LocalQueryRequest, fastapi_request: Request):
222
  start_time = time.time()
223
  timing_data = {}
224
  try:
225
+ user_ip = get_client_ip(fastapi_request)
226
+ user_agent = fastapi_request.headers.get("user-agent", "Unknown")
227
+
228
  print("=== INPUT JSON ===")
229
  print(json.dumps({"document_path": request.document_path, "questions": request.questions}, indent=2))
230
  print("==================\n")
231
 
 
 
232
  pdf_start = time.time()
233
+ text_chunks = parse_document_file(request.document_path)
234
  timing_data['pdf_parsing'] = round(time.time() - pdf_start, 2)
 
235
 
236
  index_start = time.time()
237
  index, texts = build_faiss_index(text_chunks)
 
243
  top_chunks = retrieve_chunks(index, texts, question)
244
  all_chunks.update(top_chunks)
245
  timing_data['chunk_retrieval'] = round(time.time() - retrieval_start, 2)
 
246
 
 
247
  context_chunks = list(all_chunks)
248
  batch_size = 20
249
+ batches = [(i, request.questions[i:i + batch_size]) for i in range(0, len(request.questions), batch_size)]
250
 
251
  llm_start = time.time()
252
  results_dict = {}
 
266
  results_dict[start_idx + j] = f"Error: {str(e)}"
267
  timing_data['llm_processing'] = round(time.time() - llm_start, 2)
268
 
269
+ responses = [results_dict.get(i, "Not Found") for i in range(len(request.questions))]
270
+ total_time = time.time() - start_time
271
+ timing_data['total_time'] = round(total_time, 2)
272
+
273
+ # Log to Supabase with user_agent + geo_location
274
+ for q, a in zip(request.questions, responses):
275
+ log_query(
276
+ document_source=request.document_path or "UNKNOWN",
277
+ question=q,
278
+ answer=a,
279
+ ip_address=user_ip,
280
+ user_agent=user_agent,
281
+ response_time=total_time
282
+ )
283
 
284
  return {"answers": responses}
285
 
286
  except Exception as e:
 
287
  raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
288
 
289
+
290
  if __name__ == "__main__":
291
  port = int(os.environ.get("PORT", 7860))
292
  uvicorn.run("app:app", host="0.0.0.0", port=port)
content_readers/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ import requests
3
+ import os
4
+ from .pdf_extractor import parse_pdf_from_url_multithreaded, parse_pdf_from_file_multithreaded
5
+ from .image_extractor import is_image, extract_text_from_image_bytes
6
+ from .web_extractor import extract_text_from_html
7
+ from .zip_extractor import extract_from_zip_bytes
8
+
9
+ def parse_document_url(url):
10
+ try:
11
+ res = requests.get(url)
12
+ content = res.content
13
+ content_type = res.headers.get("content-type", "").lower()
14
+ except Exception as e:
15
+ return [f"Download error: {str(e)}"]
16
+
17
+ if "text/html" in content_type or url.endswith(".html"):
18
+ return extract_text_from_html(content)
19
+
20
+ if "zip" in content_type or url.endswith(".zip"):
21
+ zip_results = extract_from_zip_bytes(content)
22
+ return [f"{name}: {text}" for name, texts in zip_results.items() for text in texts]
23
+
24
+ if "image" in content_type or is_image(content):
25
+ text = extract_text_from_image_bytes(content)
26
+ return [text] if text else ["No data found (image empty)"]
27
+
28
+ if "pdf" in content_type or url.endswith(".pdf"):
29
+ return parse_pdf_from_url_multithreaded(BytesIO(content))
30
+
31
+ return ["Unsupported file type"]
32
+
33
+ def parse_document_file(file_path):
34
+ if file_path.lower().endswith(".zip"):
35
+ with open(file_path, "rb") as f:
36
+ zip_results = extract_from_zip_bytes(f.read())
37
+ return [f"{name}: {text}" for name, texts in zip_results.items() for text in texts]
38
+
39
+ if file_path.lower().endswith((".png", ".jpg", ".jpeg", ".bmp", ".gif", ".tiff", ".webp")):
40
+ with open(file_path, "rb") as f:
41
+ text = extract_text_from_image_bytes(f.read())
42
+ return [text] if text else ["No data found (image empty)"]
43
+
44
+ if file_path.lower().endswith(".pdf"):
45
+ return parse_pdf_from_file_multithreaded(file_path)
46
+
47
+ if file_path.lower().endswith(".html"):
48
+ with open(file_path, "r", encoding="utf-8") as f:
49
+ content = f.read()
50
+ return extract_text_from_html(content)
51
+
52
+ return ["Unsupported file type"]
content_readers/image_extractor.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import imghdr
2
+ from PIL import Image
3
+ import pytesseract
4
+ from io import BytesIO
5
+
6
+ def is_image(content):
7
+ return imghdr.what(None, h=content) in ["jpeg", "png", "bmp", "gif", "tiff", "webp"]
8
+
9
+ def extract_text_from_image_bytes(image_bytes):
10
+ image = Image.open(BytesIO(image_bytes))
11
+ return pytesseract.image_to_string(image).strip()
content_readers/pdf_extractor.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fitz # PyMuPDF
2
+ from concurrent.futures import ThreadPoolExecutor
3
+
4
+ def _extract_text(page):
5
+ text = page.get_text()
6
+ return text.strip() if text and text.strip() else None
7
+
8
+ def parse_pdf_from_url_multithreaded(content, max_workers=2, chunk_size=1):
9
+ try:
10
+ with fitz.open(stream=content, filetype="pdf") as doc:
11
+ pages = list(doc)
12
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
13
+ texts = list(executor.map(_extract_text, pages))
14
+ if chunk_size > 1:
15
+ chunks = []
16
+ for i in range(0, len(texts), chunk_size):
17
+ chunk = ' '.join([t for t in texts[i:i+chunk_size] if t])
18
+ if chunk:
19
+ chunks.append(chunk)
20
+ return chunks if chunks else ["No data found in this document (empty PDF)"]
21
+ return [t for t in texts if t] or ["No data found in this document (empty PDF)"]
22
+ except Exception as e:
23
+ print(f"❌ Failed to parse as PDF: {str(e)}")
24
+ return [f"No data found in this document (not PDF or corrupted)"]
25
+
26
+ def parse_pdf_from_file_multithreaded(file_path, max_workers=2, chunk_size=1):
27
+ try:
28
+ with fitz.open(file_path) as doc:
29
+ pages = list(doc)
30
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
31
+ texts = list(executor.map(_extract_text, pages))
32
+ if chunk_size > 1:
33
+ chunks = []
34
+ for i in range(0, len(texts), chunk_size):
35
+ chunk = ' '.join([t for t in texts[i:i+chunk_size] if t])
36
+ if chunk:
37
+ chunks.append(chunk)
38
+ return chunks if chunks else ["No data found in this document (local PDF empty)"]
39
+ return [t for t in texts if t] or ["No data found in this document (local PDF empty)"]
40
+ except Exception as e:
41
+ print(f"❌ Failed to open local file: {str(e)}")
42
+ return [f"No data found in this document (local file error)"]
content_readers/web_extractor.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+
3
+ def extract_text_from_html(content):
4
+ try:
5
+ soup = BeautifulSoup(content, "html.parser")
6
+ text = soup.get_text(separator="\n")
7
+ lines = [t.strip() for t in text.splitlines() if t.strip()]
8
+ return lines if lines else ["No data found in this document (empty HTML)"]
9
+ except Exception as e:
10
+ print(f"❌ HTML parse failed: {str(e)}")
11
+ return [f"No data found in this document (HTML error)"]
content_readers/zip_extractor.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import zipfile
2
+ from io import BytesIO
3
+ from .pdf_extractor import parse_pdf_from_url_multithreaded
4
+ from .image_extractor import is_image, extract_text_from_image_bytes
5
+
6
+ def extract_from_zip_bytes(zip_bytes):
7
+ """
8
+ Extract and process files inside a ZIP archive.
9
+ Returns a dictionary: {filename: extracted_text_list}
10
+ """
11
+ results = {}
12
+ try:
13
+ with zipfile.ZipFile(BytesIO(zip_bytes)) as z:
14
+ for file_name in z.namelist():
15
+ try:
16
+ file_data = z.read(file_name)
17
+ except Exception as e:
18
+ results[file_name] = [f"❌ Failed to read file: {e}"]
19
+ continue
20
+
21
+ # PDF files
22
+ if file_name.lower().endswith(".pdf"):
23
+ results[file_name] = parse_pdf_from_url_multithreaded(BytesIO(file_data))
24
+
25
+ # Image files
26
+ elif is_image(file_data):
27
+ text = extract_text_from_image_bytes(file_data)
28
+ results[file_name] = [text] if text else ["No data found (image empty)"]
29
+
30
+ # Unsupported files
31
+ else:
32
+ results[file_name] = ["⚠ Unsupported file type inside ZIP"]
33
+
34
+ return results if results else {"ZIP": ["No supported files found in archive"]}
35
+
36
+ except zipfile.BadZipFile:
37
+ return {"ZIP": ["Invalid or corrupted ZIP file"]}
38
+ except Exception as e:
39
+ return {"ZIP": [f"Error processing ZIP: {e}"]}
db_logger.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime
3
+ from supabase import create_client, Client
4
+ import requests
5
+
6
+ SUPABASE_URL = os.getenv("SUPABASE_URL")
7
+ SUPABASE_KEY = os.getenv("SUPABASE_KEY")
8
+
9
+ supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
10
+
11
+ def get_geo_location(ip: str) -> str:
12
+ """
13
+ Fetch approximate geo-location for the given IP address.
14
+ Returns 'Unknown' if lookup fails.
15
+ """
16
+ try:
17
+ if ip.startswith("127.") or ip == "localhost":
18
+ return "Localhost"
19
+ resp = requests.get(f"https://ipapi.co/{ip}/country_name/", timeout=3)
20
+ if resp.status_code == 200:
21
+ return resp.text.strip() or "Unknown"
22
+ except Exception:
23
+ pass
24
+ return "Unknown"
25
+
26
+ def log_query(document_source: str, question: str, answer: str,
27
+ ip_address: str, response_time: float,
28
+ user_agent: str = None):
29
+ """
30
+ Store a question-answer log in Supabase with geo-location and user-agent.
31
+ """
32
+ now_str = datetime.utcnow().isoformat()
33
+ geo_location = get_geo_location(ip_address)
34
+
35
+ try:
36
+ supabase.table("qa_logs").insert({
37
+ "document_source": document_source,
38
+ "question": question,
39
+ "answer": answer,
40
+ "ip_address": ip_address,
41
+ "geo_location": geo_location,
42
+ "user_agent": user_agent or "Unknown",
43
+ "response_time_sec": round(response_time, 2),
44
+ "created_at": now_str
45
+ }).execute()
46
+ except Exception as e:
47
+ print(f"Failed to log query to Supabase: {e}")
embedder.py CHANGED
@@ -24,7 +24,7 @@ def preload_model(model_name="paraphrase-MiniLM-L3-v2"):
24
  print(f"Trying fallback: {fallback_name}")
25
  _model = SentenceTransformer(fallback_name, cache_folder=cache_dir)
26
 
27
- print("βœ… Model ready.")
28
  return _model
29
 
30
  def get_model():
 
24
  print(f"Trying fallback: {fallback_name}")
25
  _model = SentenceTransformer(fallback_name, cache_folder=cache_dir)
26
 
27
+ print(" πŸ‘ Model ready.")
28
  return _model
29
 
30
  def get_model():
requirements.txt CHANGED
@@ -10,3 +10,4 @@ google-generativeai
10
  pytesseract
11
  Pillow
12
  beautifulsoup4
 
 
10
  pytesseract
11
  Pillow
12
  beautifulsoup4
13
+ supabase
utils.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Request
2
+
3
+ def get_client_ip(request: Request):
4
+ forwarded_for = request.headers.get("x-forwarded-for")
5
+ if forwarded_for:
6
+ return forwarded_for.split(",")[0].strip()
7
+ real_ip = request.headers.get("x-real-ip")
8
+ if real_ip:
9
+ return real_ip
10
+ return request.client.host