Muzammil6376 commited on
Commit
6d145b6
Β·
verified Β·
1 Parent(s): ae644bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -121
app.py CHANGED
@@ -31,18 +31,18 @@ combined_texts: List[str] = [] # Combined text + image captions corpus
31
  FIGURES_DIR = "figures"
32
  if os.path.exists(FIGURES_DIR):
33
  shutil.rmtree(FIGURES_DIR)
34
- os.makedirs(FIGURES_DIR, exist_ok=True)
35
 
36
  # ── Clients & Models ───────────────────────────────────────────────────────────
37
- hf = InferenceClient() # will use HUGGINGFACEHUB_API_TOKEN from env
38
 
39
- # BLIP captioner (small local model download)
40
  blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
41
- blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
42
 
43
 
44
  def generate_caption(image_path: str) -> str:
45
- """Ask BLIP to caption a local image."""
46
  image = Image.open(image_path).convert("RGB")
47
  inputs = blip_processor(image, return_tensors="pt")
48
  out = blip_model.generate(**inputs)
@@ -50,23 +50,17 @@ def generate_caption(image_path: str) -> str:
50
 
51
 
52
  def embed_texts(texts: List[str]) -> List[List[float]]:
53
- """
54
- Call the HF embeddings endpoint using google/Gemma-Embeddings-v1.0.
55
- """
56
- resp = hf.embeddings(
57
- model="google/Gemma-Embeddings-v1.0",
58
- inputs=texts,
59
- )
60
  return resp["embeddings"]
61
 
62
 
63
  def process_pdf(pdf_file) -> str:
64
  """
65
- Parse the PDF, caption images, combine text+captions, embed remotely,
66
- build FAISS index, and prepare retriever. Falls back to text-only if poppler is missing.
67
  """
68
- from pdf2image.exceptions import PDFInfoNotInstalledError
69
- global current_pdf_name, retriever, combined_texts
70
 
71
  if pdf_file is None:
72
  return "❌ Please upload a PDF file."
@@ -74,106 +68,52 @@ def process_pdf(pdf_file) -> str:
74
  pdf_path = pdf_file.name
75
  current_pdf_name = os.path.basename(pdf_path)
76
 
77
- # Try rich parsing; fallback if poppler/pdfinfo is unavailable
78
  try:
 
79
  elements = partition_pdf(
80
  filename=pdf_path,
81
  strategy=PartitionStrategy.HI_RES,
82
- extract_image_block_types=["Image", "Table"],
83
  extract_image_block_output_dir=FIGURES_DIR,
84
  )
85
  text_elements = [el.text for el in elements if el.category not in ["Image","Table"] and el.text]
86
  image_files = [os.path.join(FIGURES_DIR, f) for f in os.listdir(FIGURES_DIR)
87
  if f.lower().endswith((".png",".jpg",".jpeg"))]
88
- except PDFInfoNotInstalledError:
89
- # Fallback: text-only extraction
90
- from PyPDF2 import PdfReader
91
  reader = PdfReader(pdf_path)
92
  text_elements = [page.extract_text() or "" for page in reader.pages]
93
  image_files = []
94
 
95
- # Caption images if any
96
  captions = [generate_caption(img) for img in image_files]
97
-
98
  combined_texts = text_elements + captions
99
- vectors = embed_texts(combined_texts)
100
- index = FAISS.from_embeddings(texts=combined_texts, embeddings=vectors)
101
- retriever = index.as_retriever(search_kwargs={"k": 2})
102
-
103
- return f"βœ… Indexed '{current_pdf_name}' β€” {len(text_elements)} text blocks + {len(captions)} image captions"
104
- """
105
- Parse the PDF, caption images, combine text+captions, embed remotely,
106
- build FAISS index, and prepare retriever.
107
- """
108
- global current_pdf_name, retriever, combined_texts
109
-
110
- if pdf_file is None:
111
- return "❌ Please upload a PDF file."
112
-
113
- # Save and name
114
- pdf_path = pdf_file.name
115
- current_pdf_name = os.path.basename(pdf_path)
116
 
117
- # Extract blocks
118
- elements = partition_pdf(
119
- filename=pdf_path,
120
- strategy=PartitionStrategy.HI_RES,
121
- extract_image_block_types=["Image", "Table"],
122
- extract_image_block_output_dir=FIGURES_DIR,
123
- )
124
-
125
- # Split text vs. images
126
- text_elements = [
127
- el.text for el in elements
128
- if el.category not in ["Image", "Table"] and el.text
129
- ]
130
- image_files = [
131
- os.path.join(FIGURES_DIR, f)
132
- for f in os.listdir(FIGURES_DIR)
133
- if f.lower().endswith((".png", ".jpg", ".jpeg"))
134
- ]
135
-
136
- # Caption images
137
- captions = [generate_caption(img) for img in image_files]
138
-
139
- # Combine
140
- combined_texts = text_elements + captions
141
-
142
- # Remote embeddings
143
  vectors = embed_texts(combined_texts)
144
-
145
- # Build FAISS
146
- index = FAISS.from_embeddings(
147
- texts=combined_texts,
148
- embeddings=vectors,
149
- )
150
- retriever = index.as_retriever(search_kwargs={"k": 2})
151
 
152
  return f"βœ… Indexed '{current_pdf_name}' β€” {len(text_elements)} text blocks + {len(captions)} image captions"
153
 
154
 
155
  def ask_question(question: str) -> str:
156
- """
157
- Retrieve top-k chunks from FAISS and call chat_completions endpoint.
158
- """
159
  global retriever
160
  if retriever is None:
161
- return "❌ Please upload and process a PDF first."
162
  if not question.strip():
163
  return "❌ Please enter a question."
164
 
165
  docs = retriever.get_relevant_documents(question)
166
  context = "\n\n".join(doc.page_content for doc in docs)
167
-
168
  prompt = (
169
- "Use the following document excerpts to answer the question.\n\n"
170
- f"{context}\n\n"
171
- f"Question: {question}\n"
172
- "Answer:"
173
  )
174
  response = hf.chat_completion(
175
  model="google/gemma-3-27b-it",
176
- messages=[{"role": "user", "content": prompt}],
177
  max_tokens=128,
178
  temperature=0.5,
179
  )
@@ -181,7 +121,7 @@ def ask_question(question: str) -> str:
181
 
182
 
183
  def clear_interface():
184
- """Reset state and clear extracted images."""
185
  global retriever, current_pdf_name, combined_texts
186
  retriever = None
187
  current_pdf_name = None
@@ -190,45 +130,24 @@ def clear_interface():
190
  os.makedirs(FIGURES_DIR, exist_ok=True)
191
  return ""
192
 
193
- # ── Gradio UI ────────────────────────────────────────────────────────────────
194
- theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue")
195
- with gr.Blocks(theme=theme, css="""
196
- .container { border-radius: 10px; padding: 15px; }
197
- .pdf-active { border-left: 3px solid #6366f1;
198
- padding-left: 10px;
199
- background-color: rgba(99,102,241,0.1); }
200
- .footer { text-align: center; margin-top: 30px;
201
- font-size: 0.8em; color: #666; }
202
- .main-title { text-align: center; font-size: 64px;
203
- font-weight: bold; margin-bottom: 20px; }
204
- """) as demo:
205
- gr.Markdown("<div class='main-title'>DocQueryAI (Remote‐RAG)</div>")
206
-
207
  with gr.Row():
208
  with gr.Column():
209
- gr.Markdown("## πŸ“„ Document Input")
210
- pdf_file = gr.File(label="Upload PDF", file_types=[".pdf"], type="filepath")
211
- process_btn = gr.Button("πŸ“€ Process Document", variant="primary")
212
- status_box = gr.Textbox(label="Status", interactive=False)
213
-
214
  with gr.Column():
215
- gr.Markdown("## ❓ Ask Questions")
216
- question_input = gr.Textbox(lines=3,
217
- placeholder="Enter your question here…")
218
- ask_btn = gr.Button("πŸ” Ask Question", variant="primary")
219
- answer_output = gr.Textbox(label="Answer", lines=8, interactive=False)
220
-
221
- clear_btn = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
222
- gr.Markdown("<div class='footer'>Powered by HF Inference + BLIP + FAISS | Gradio</div>")
223
-
224
- process_btn.click(fn=process_pdf,
225
- inputs=[pdf_file],
226
- outputs=[status_box])
227
- ask_btn.click(fn=ask_question,
228
- inputs=[question_input],
229
- outputs=[answer_output])
230
- clear_btn.click(fn=clear_interface,
231
- outputs=[status_box, answer_output])
232
 
233
  if __name__ == "__main__":
234
- demo.launch(debug=True, share=True)
 
 
31
  FIGURES_DIR = "figures"
32
  if os.path.exists(FIGURES_DIR):
33
  shutil.rmtree(FIGURES_DIR)
34
+ os.makedirs(FIGURES_DIR, exist_ok=True)
35
 
36
  # ── Clients & Models ───────────────────────────────────────────────────────────
37
+ hf = InferenceClient() # uses HUGGINGFACEHUB_API_TOKEN env var
38
 
39
+ # BLIP captioner
40
  blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
41
+ blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
42
 
43
 
44
  def generate_caption(image_path: str) -> str:
45
+ """Generate caption for image via BLIP."""
46
  image = Image.open(image_path).convert("RGB")
47
  inputs = blip_processor(image, return_tensors="pt")
48
  out = blip_model.generate(**inputs)
 
50
 
51
 
52
  def embed_texts(texts: List[str]) -> List[List[float]]:
53
+ """Call HF inference embeddings endpoint."""
54
+ resp = hf.embeddings(model="google/Gemma-Embeddings-v1.0", inputs=texts)
 
 
 
 
 
55
  return resp["embeddings"]
56
 
57
 
58
  def process_pdf(pdf_file) -> str:
59
  """
60
+ Parse PDF, extract text and images, caption images,
61
+ embed all chunks remotely, build FAISS index.
62
  """
63
+ global retriever, current_pdf_name, combined_texts
 
64
 
65
  if pdf_file is None:
66
  return "❌ Please upload a PDF file."
 
68
  pdf_path = pdf_file.name
69
  current_pdf_name = os.path.basename(pdf_path)
70
 
71
+ # Attempt rich parsing
72
  try:
73
+ from pdf2image.exceptions import PDFInfoNotInstalledError
74
  elements = partition_pdf(
75
  filename=pdf_path,
76
  strategy=PartitionStrategy.HI_RES,
77
+ extract_image_block_types=["Image","Table"],
78
  extract_image_block_output_dir=FIGURES_DIR,
79
  )
80
  text_elements = [el.text for el in elements if el.category not in ["Image","Table"] and el.text]
81
  image_files = [os.path.join(FIGURES_DIR, f) for f in os.listdir(FIGURES_DIR)
82
  if f.lower().endswith((".png",".jpg",".jpeg"))]
83
+ except Exception:
84
+ # Fallback to text-only
85
+ from pypdf import PdfReader
86
  reader = PdfReader(pdf_path)
87
  text_elements = [page.extract_text() or "" for page in reader.pages]
88
  image_files = []
89
 
 
90
  captions = [generate_caption(img) for img in image_files]
 
91
  combined_texts = text_elements + captions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  vectors = embed_texts(combined_texts)
94
+ index = FAISS.from_embeddings(texts=combined_texts, embeddings=vectors)
95
+ retriever = index.as_retriever(search_kwargs={"k":2})
 
 
 
 
 
96
 
97
  return f"βœ… Indexed '{current_pdf_name}' β€” {len(text_elements)} text blocks + {len(captions)} image captions"
98
 
99
 
100
  def ask_question(question: str) -> str:
101
+ """Retrieve from FAISS and call chat completion."""
 
 
102
  global retriever
103
  if retriever is None:
104
+ return "❌ Please process a PDF first."
105
  if not question.strip():
106
  return "❌ Please enter a question."
107
 
108
  docs = retriever.get_relevant_documents(question)
109
  context = "\n\n".join(doc.page_content for doc in docs)
 
110
  prompt = (
111
+ "Use the following excerpts to answer the question:\n\n"
112
+ f"{context}\n\nQuestion: {question}\nAnswer:"
 
 
113
  )
114
  response = hf.chat_completion(
115
  model="google/gemma-3-27b-it",
116
+ messages=[{"role":"user","content":prompt}],
117
  max_tokens=128,
118
  temperature=0.5,
119
  )
 
121
 
122
 
123
  def clear_interface():
124
+ """Reset all state and clear extracted images."""
125
  global retriever, current_pdf_name, combined_texts
126
  retriever = None
127
  current_pdf_name = None
 
130
  os.makedirs(FIGURES_DIR, exist_ok=True)
131
  return ""
132
 
133
+ # Gradio UI
134
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="blue")) as demo:
135
+ gr.Markdown("# DocQueryAI (Remote‐RAG)")
 
 
 
 
 
 
 
 
 
 
 
136
  with gr.Row():
137
  with gr.Column():
138
+ pdf_file = gr.File(file_types=[".pdf"], type="filepath")
139
+ process_btn = gr.Button("Process PDF")
140
+ status_box = gr.Textbox(interactive=False)
 
 
141
  with gr.Column():
142
+ question_input = gr.Textbox(lines=3)
143
+ ask_btn = gr.Button("Ask")
144
+ answer_output = gr.Textbox(interactive=False)
145
+ clear_btn = gr.Button("Clear All")
146
+
147
+ process_btn.click(fn=process_pdf, inputs=[pdf_file], outputs=[status_box])
148
+ ask_btn.click(fn=ask_question, inputs=[question_input], outputs=[answer_output])
149
+ clear_btn.click(fn=clear_interface, outputs=[status_box, answer_output])
 
 
 
 
 
 
 
 
 
150
 
151
  if __name__ == "__main__":
152
+ demo.launch()
153
+