Muzamil305 commited on
Commit
6eba85c
Β·
verified Β·
1 Parent(s): 7e41ac9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +652 -216
app.py CHANGED
@@ -1,221 +1,657 @@
1
  import os
2
- import PyPDF2
3
  import gradio as gr
4
-
5
- # Import vectorstore and embeddings from langchain community package
6
- from langchain_community.vectorstores import FAISS
7
- from langchain_community.embeddings import HuggingFaceEmbeddings
8
- # Text splitter to break large documents into manageable chunks
9
- from langchain.text_splitter import CharacterTextSplitter
10
- # HF Inference client for running Mistral-7B chat completions
11
  from huggingface_hub import InferenceClient
12
-
13
- # ── Globals ───────────────────────────────────────────────────────────────────
14
- index = None # FAISS index storing document embeddings
15
- retriever = None # Retriever to fetch relevant chunks
16
- current_pdf_name = None # Name of the currently loaded PDF
17
- pdf_text = None # Full text of the uploaded PDF
18
-
19
- # ── HF Inference client (token injected via Spaces secrets) ─────────────────────
20
- # Instantiate client for conversational endpoint (Mistral-7B-Instruct)
21
- client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.3")
22
-
23
- # ── Embeddings ───────────────────────────────────────────────────────────────
24
- # Use BGE embeddings from BAAI for vectorizing text chunks
25
- embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-base-en-v1.5")
26
-
27
- def process_pdf(pdf_file):
28
- """
29
- 1. Reads and extracts text from each page of the uploaded PDF.
30
- 2. Splits the combined text into overlapping chunks for retrieval.
31
- 3. Builds a FAISS index over those chunks and initializes a retriever.
32
- Args:
33
- pdf_file: Filepath to the uploaded PDF.
34
- Returns:
35
- - PDF filename shown in UI
36
- - Status message with number of chunks
37
- - Enables the question input field
38
- """
39
- global current_pdf_name, index, retriever, pdf_text
40
-
41
- # If no file uploaded, prompt the user
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  if pdf_file is None:
43
- return None, "❌ Please upload a PDF file.", gr.update(interactive=False)
44
-
45
- # Save current filename for display and context
46
- current_pdf_name = os.path.basename(pdf_file.name)
47
-
48
- # Extract text from all pages
49
- with open(pdf_file.name, "rb") as f:
50
- reader = PyPDF2.PdfReader(f)
51
- pages = [page.extract_text() or "" for page in reader.pages]
52
- pdf_text = "\n\n".join(pages) # Combine page texts
53
-
54
- # Break text into 1,000-character chunks with 100-char overlap
55
- splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
56
- chunks = splitter.split_text(pdf_text)
57
-
58
- # Build and store FAISS index for similarity search
59
- index = FAISS.from_texts(chunks, embeddings)
60
-
61
- # Create retriever configured to return top-2 most relevant chunks
62
- retriever = index.as_retriever(search_kwargs={"k": 2})
63
-
64
- # Return filename, success status, and enable the question box
65
- status = f"βœ… Indexed '{current_pdf_name}' β€” {len(chunks)} chunks"
66
- return current_pdf_name, status, gr.update(interactive=True)
67
-
68
-
69
- def ask_question(pdf_name, question):
70
- """
71
- 1. Retrieves the top-k most relevant text chunks from the FAISS index.
72
- 2. Constructs a prompt combining those excerpts with the user question.
73
- 3. Calls the HF chat endpoint to generate an answer.
74
- Args:
75
- pdf_name: The displayed PDF filename (unused internally).
76
- question: The user's question about the document.
77
- Returns:
78
- The generated answer as a string.
79
- """
80
- global retriever
81
-
82
- # Ensure a PDF is loaded first
83
- if index is None or retriever is None:
84
- return "❌ Please upload and index a PDF first."
85
- # Prompt user to type something if empty
86
- if not question.strip():
87
- return "❌ Please enter a question."
88
-
89
- # Fetch relevant document chunks
90
- docs = retriever.get_relevant_documents(question)
91
- context = "\n\n".join(doc.page_content for doc in docs)
92
-
93
- # Prepare the conversational prompt
94
- prompt = (
95
- "Use the following document excerpts to answer the question.\n\n"
96
- f"{context}\n\n"
97
- f"Question: {question}\n"
98
- "Answer:"
99
- )
100
-
101
- # Run chat completion with the prompt as the user's message
102
- response = client.chat_completion(
103
- messages=[{"role": "user", "content": prompt}],
104
- max_tokens=128,
105
- temperature=0.5
106
- )
107
-
108
- # Parse assistant reply from the choices
109
- answer = response["choices"][0]["message"]["content"].strip()
110
- return answer
111
-
112
-
113
- def generate_summary():
114
- """
115
- Uses the first 2,000 characters of the loaded PDF text to ask the model for a concise summary.
116
- """
117
- if not pdf_text:
118
- return "❌ Please upload and index a PDF first."
119
-
120
- # Shorten long docs to 2k chars for summarization
121
- prompt = (
122
- "Please provide a concise summary of the following document:\n\n"
123
- f"{pdf_text[:2000]}..."
124
- )
125
- response = client.chat_completion(
126
- messages=[{"role": "user", "content": prompt}],
127
- max_tokens=150,
128
- temperature=0.5
129
- )
130
- return response["choices"][0]["message"]["content"].strip()
131
-
132
-
133
- def extract_keywords():
134
- """
135
- Uses the first 2,000 characters to ask the model to extract key terms or concepts.
136
- """
137
- if not pdf_text:
138
- return "❌ Please upload and index a PDF first."
139
-
140
- prompt = (
141
- "Extract 10–15 key terms or concepts from the following document:\n\n"
142
- f"{pdf_text[:2000]}..."
143
- )
144
- response = client.chat_completion(
145
- messages=[{"role": "user", "content": prompt}],
146
- max_tokens=60,
147
- temperature=0.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  )
149
- return response["choices"][0]["message"]["content"].strip()
150
-
151
-
152
- def clear_interface():
153
- """
154
- Resets all global state back to None, and clears inputs in the UI.
155
- """
156
- global index, retriever, current_pdf_name, pdf_text
157
- index = retriever = None
158
- current_pdf_name = pdf_text = None
159
- # Clear displayed filename and re-disable question input
160
- return None, "", gr.update(interactive=False)
161
-
162
- # ── Gradio UI ────────────────────────────────────────────────────────────────
163
- theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue")
164
-
165
- with gr.Blocks(theme=theme, css="""
166
- .container { border-radius: 10px; padding: 15px; }
167
- .pdf-active { border-left: 3px solid #6366f1; padding-left: 10px; background-color: rgba(99,102,241,0.1); }
168
- .footer { text-align: center; margin-top: 30px; font-size: 0.8em; color: #666; }
169
- /* Center and enlarge the main heading */
170
- .main-title {
171
- text-align: center;
172
- font-size: 64px;
173
- font-weight: bold;
174
- margin-bottom: 20px;
175
- }
176
- """) as demo:
177
- # Application title centered and bold
178
- gr.Markdown("<div class='main-title'>DocQueryAI</div>")
179
-
180
- with gr.Row():
181
- with gr.Column():
182
- gr.Markdown("## πŸ“„ Document Input")
183
- # Display the name of the active PDF
184
- pdf_display = gr.Textbox(label="Active Document", interactive=False, elem_classes="pdf-active")
185
- # File upload widget for PDFs
186
- pdf_file = gr.File(file_types=[".pdf"], type="filepath")
187
- # Button to start processing
188
- upload_button = gr.Button("πŸ“€ Process Document", variant="primary")
189
- # Status text below the button
190
- status_box = gr.Textbox(label="Status", interactive=False)
191
-
192
- with gr.Column():
193
- gr.Markdown("## ❓ Ask Questions")
194
- # Text area for user questions
195
- question_input = gr.Textbox(lines=3, placeholder="Enter your question here…")
196
- # Button to trigger Q&A
197
- ask_button = gr.Button("πŸ” Ask Question", variant="primary")
198
- # Output textbox for the generated answer
199
- answer_output = gr.Textbox(label="Answer", lines=8, interactive=False)
200
-
201
- # Footer section with summary and keywords extraction
202
- with gr.Row():
203
- summary_button = gr.Button("πŸ“‹ Generate Summary", variant="secondary")
204
- summary_output = gr.Textbox(label="Summary", lines=4, interactive=False)
205
- keywords_button = gr.Button("🏷️ Extract Keywords", variant="secondary")
206
- keywords_output = gr.Textbox(label="Keywords", lines=4, interactive=False)
207
-
208
- # Clear everything
209
- clear_button = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
210
- gr.Markdown("<div class='footer'>Powered by LangChain + Mistral 7B + FAISS | Gradio</div>")
211
-
212
- # Bind events to functions
213
- upload_button.click(process_pdf, [pdf_file], [pdf_display, status_box, question_input])
214
- ask_button.click(ask_question, [pdf_display, question_input], answer_output)
215
- summary_button.click(generate_summary, [], summary_output)
216
- keywords_button.click(extract_keywords, [], keywords_output)
217
- clear_button.click(clear_interface, [], [pdf_file, pdf_display, question_input])
218
-
219
- if __name__ == "__main__":
220
- # Launch the Gradio app, share=True exposes a public URL in Spaces
221
- demo.launch(debug=True, share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
 
2
  import gradio as gr
3
+ import tempfile
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_core.vectorstores import InMemoryVectorStore
6
+ from langchain_huggingface import HuggingFaceEmbeddings
7
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
8
+ from unstructured.partition.pdf import partition_pdf
9
+ from unstructured.partition.utils.constants import PartitionStrategy
10
  from huggingface_hub import InferenceClient
11
+ import base64
12
+ from PIL import Image
13
+ import io
14
+ import requests
15
+ from getpass import getpass
16
+ import PyPDF2
17
+ import fitz # PyMuPDF
18
+ import pytesseract
19
+
20
+ # # Step 2: Set up Hugging Face Token
21
+ # print("πŸ”‘ Setting up Hugging Face Token...")
22
+ # print("Please enter your Hugging Face token (get it from: https://huggingface.co/settings/tokens)")
23
+ # HF_TOKEN = getpass("Hugging Face Token: ")
24
+
25
+ # # Set environment variable
26
+ # os.environ["HUGGINGFACE_HUB_TOKEN"] = HF_TOKEN
27
+
28
+ # Step 3: Initialize Hugging Face components
29
+ print("πŸš€ Initializing models...")
30
+
31
+ # Initialize embeddings model (runs locally for better performance)
32
+ embeddings = HuggingFaceEmbeddings(
33
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
34
+ model_kwargs={'device': 'cpu'}
35
+ )
36
+
37
+ # Initialize vector store
38
+ vector_store = InMemoryVectorStore(embeddings)
39
+
40
+ # Initialize Hugging Face Inference clients with proper multimodal support
41
+ def initialize_multimodal_clients():
42
+ """Initialize clients with proper multimodal capabilities"""
43
+
44
+ # Vision-Language Models (can understand images AND text together)
45
+ multimodal_models = [
46
+ "microsoft/git-large-coco", # Best for image+text understanding
47
+ "Salesforce/blip2-opt-2.7b", # Strong multimodal model
48
+ "microsoft/git-base-coco", # Lighter alternative
49
+ "Salesforce/blip-image-captioning-large" # Good image understanding
50
+ ]
51
+
52
+ # Text-only models for when no images are involved
53
+ text_models = [
54
+ "google/flan-t5-base", # Excellent for Q&A
55
+ "microsoft/DialoGPT-medium", # Conversational
56
+ "facebook/blenderbot-400M-distill", # Another option
57
+ ]
58
+
59
+ vision_client = None
60
+ text_client = None
61
+
62
+ # Try to initialize multimodal/vision client
63
+ for model_name in multimodal_models:
64
+ try:
65
+ vision_client = InferenceClient(model=model_name, token=HF_TOKEN)
66
+ print(f"βœ… Multimodal client initialized: {model_name}")
67
+ break
68
+ except Exception as e:
69
+ print(f"⚠️ Failed to initialize {model_name}: {e}")
70
+ continue
71
+
72
+ # Try to initialize text client
73
+ for model_name in text_models:
74
+ try:
75
+ text_client = InferenceClient(model=model_name, token=HF_TOKEN)
76
+ print(f"βœ… Text client initialized: {model_name}")
77
+ break
78
+ except Exception as e:
79
+ print(f"⚠️ Failed to initialize {model_name}: {e}")
80
+ continue
81
+
82
+ return vision_client, text_client
83
+
84
+ vision_client, text_client = initialize_multimodal_clients()
85
+
86
+ template = """
87
+ You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
88
+ Question: {question}
89
+ Context: {context}
90
+ Answer:
91
+ """
92
+
93
+ def extract_text_with_multiple_methods(pdf_path):
94
+ """Try multiple methods to extract text from PDF"""
95
+ extracted_text = ""
96
+ methods_tried = []
97
+
98
+ # Method 1: PyPDF2
99
+ try:
100
+ print("πŸ” Trying PyPDF2...")
101
+ with open(pdf_path, 'rb') as file:
102
+ pdf_reader = PyPDF2.PdfReader(file)
103
+ text_parts = []
104
+ for page_num, page in enumerate(pdf_reader.pages):
105
+ page_text = page.extract_text()
106
+ if page_text.strip():
107
+ text_parts.append(f"Page {page_num + 1}:\n{page_text}")
108
+
109
+ if text_parts:
110
+ extracted_text = "\n\n".join(text_parts)
111
+ methods_tried.append("PyPDF2")
112
+ print(f"βœ… PyPDF2 extracted {len(extracted_text)} characters")
113
+ except Exception as e:
114
+ print(f"⚠️ PyPDF2 failed: {e}")
115
+
116
+ # Method 2: PyMuPDF (fitz) - often better for complex PDFs
117
+ if not extracted_text.strip():
118
+ try:
119
+ print("πŸ” Trying PyMuPDF...")
120
+ doc = fitz.open(pdf_path)
121
+ text_parts = []
122
+ for page_num in range(len(doc)):
123
+ page = doc.load_page(page_num)
124
+ page_text = page.get_text()
125
+ if page_text.strip():
126
+ text_parts.append(f"Page {page_num + 1}:\n{page_text}")
127
+
128
+ if text_parts:
129
+ extracted_text = "\n\n".join(text_parts)
130
+ methods_tried.append("PyMuPDF")
131
+ print(f"βœ… PyMuPDF extracted {len(extracted_text)} characters")
132
+ doc.close()
133
+ except Exception as e:
134
+ print(f"⚠️ PyMuPDF failed: {e}")
135
+
136
+ # Method 3: OCR with PyMuPDF for image-based PDFs
137
+ if not extracted_text.strip():
138
+ try:
139
+ print("πŸ” Trying OCR with PyMuPDF...")
140
+ doc = fitz.open(pdf_path)
141
+ text_parts = []
142
+ for page_num in range(min(len(doc), 5)): # Limit to first 5 pages for OCR
143
+ page = doc.load_page(page_num)
144
+ # Convert page to image
145
+ pix = page.get_pixmap()
146
+ img_data = pix.tobytes("png")
147
+ img = Image.open(io.BytesIO(img_data))
148
+
149
+ # Apply OCR
150
+ ocr_text = pytesseract.image_to_string(img)
151
+ if ocr_text.strip():
152
+ text_parts.append(f"Page {page_num + 1} (OCR):\n{ocr_text}")
153
+
154
+ if text_parts:
155
+ extracted_text = "\n\n".join(text_parts)
156
+ methods_tried.append("OCR")
157
+ print(f"βœ… OCR extracted {len(extracted_text)} characters")
158
+ doc.close()
159
+ except Exception as e:
160
+ print(f"⚠️ OCR failed: {e}")
161
+
162
+ return extracted_text, methods_tried
163
+
164
+ def upload_and_process_pdf(pdf_file):
165
+ """Process uploaded PDF file with enhanced error handling"""
166
  if pdf_file is None:
167
+ return "Please upload a PDF file first."
168
+
169
+ try:
170
+ # Create temporary directories
171
+ with tempfile.TemporaryDirectory() as temp_dir:
172
+ figures_dir = os.path.join(temp_dir, "figures")
173
+ os.makedirs(figures_dir, exist_ok=True)
174
+
175
+ # Save uploaded file temporarily
176
+ temp_pdf_path = os.path.join(temp_dir, "uploaded.pdf")
177
+ with open(temp_pdf_path, "wb") as f:
178
+ f.write(pdf_file)
179
+
180
+ # Check file size and validity
181
+ file_size = os.path.getsize(temp_pdf_path)
182
+ print(f"πŸ“„ Processing PDF: {file_size} bytes")
183
+
184
+ if file_size == 0:
185
+ return "❌ The uploaded file is empty. Please check your PDF file."
186
+
187
+ if file_size > 50 * 1024 * 1024: # 50MB limit
188
+ return "❌ File too large (>50MB). Please upload a smaller PDF."
189
+
190
+ # Try multiple extraction methods
191
+ text, methods = extract_text_with_multiple_methods(temp_pdf_path)
192
+
193
+ # Process with unstructured as backup/additional method
194
+ unstructured_text = ""
195
+ try:
196
+ print("πŸ” Trying unstructured...")
197
+ elements = partition_pdf(
198
+ temp_pdf_path,
199
+ strategy=PartitionStrategy.FAST,
200
+ extract_image_block_types=["Image", "Table"],
201
+ extract_image_block_output_dir=figures_dir,
202
+ infer_table_structure=True
203
+ )
204
+
205
+ # Extract text elements
206
+ text_elements = []
207
+ for element in elements:
208
+ if hasattr(element, 'text') and element.text and element.category not in ["Image", "Table"]:
209
+ text_elements.append(element.text)
210
+
211
+ if text_elements:
212
+ unstructured_text = "\n\n".join(text_elements)
213
+ print(f"βœ… Unstructured extracted {len(unstructured_text)} characters")
214
+
215
+ # Combine with existing text if available
216
+ if text.strip():
217
+ text = f"{text}\n\n--- Additional Content ---\n\n{unstructured_text}"
218
+ else:
219
+ text = unstructured_text
220
+ methods.append("unstructured")
221
+
222
+ except Exception as unstructured_error:
223
+ print(f"⚠️ Unstructured processing failed: {unstructured_error}")
224
+
225
+ # Process images
226
+ image_text = ""
227
+ image_count = 0
228
+ if os.path.exists(figures_dir):
229
+ for file in os.listdir(figures_dir):
230
+ if file.lower().endswith(('.png', '.jpg', '.jpeg')):
231
+ try:
232
+ extracted_image_text = extract_text_from_image(os.path.join(figures_dir, file))
233
+ image_text += f"\n\n{extracted_image_text}"
234
+ image_count += 1
235
+ except Exception as e:
236
+ print(f"⚠️ Error processing image {file}: {e}")
237
+
238
+ # Also try to extract images directly from PDF using PyMuPDF
239
+ try:
240
+ doc = fitz.open(temp_pdf_path)
241
+ for page_num in range(min(len(doc), 10)): # Process first 10 pages
242
+ page = doc.load_page(page_num)
243
+ image_list = page.get_images(full=True)
244
+
245
+ for img_index, img in enumerate(image_list[:3]): # Max 3 images per page
246
+ try:
247
+ xref = img[0]
248
+ pix = fitz.Pixmap(doc, xref)
249
+ if pix.n - pix.alpha < 4: # GRAY or RGB
250
+ img_data = pix.tobytes("png")
251
+ img_path = os.path.join(figures_dir, f"page_{page_num}_img_{img_index}.png")
252
+ with open(img_path, "wb") as img_file:
253
+ img_file.write(img_data)
254
+
255
+ extracted_image_text = extract_text_from_image(img_path)
256
+ image_text += f"\n\n{extracted_image_text}"
257
+ image_count += 1
258
+ pix = None
259
+ except Exception as img_error:
260
+ print(f"⚠️ Error extracting image: {img_error}")
261
+ continue
262
+ doc.close()
263
+ except Exception as e:
264
+ print(f"⚠️ Error extracting images from PDF: {e}")
265
+
266
+ # Combine all text
267
+ full_text = text
268
+ if image_text.strip():
269
+ full_text += f"\n\n--- Image Content ---\n{image_text}"
270
+
271
+ if not full_text.strip():
272
+ return (f"⚠️ No text could be extracted from the PDF using any method. "
273
+ f"This might be a scanned PDF without OCR text, or the file might be corrupted. "
274
+ f"Methods tried: {', '.join(['PyPDF2', 'PyMuPDF', 'OCR', 'unstructured']) if not methods else ', '.join(methods)}")
275
+
276
+ # Split and index the text
277
+ chunked_texts = split_text(full_text)
278
+
279
+ if not chunked_texts:
280
+ return "⚠️ Text was extracted but could not be split into chunks."
281
+
282
+ # Clear existing vector store and add new documents
283
+ global vector_store
284
+ vector_store = InMemoryVectorStore(embeddings)
285
+ index_docs(chunked_texts)
286
+
287
+ success_msg = (f"βœ… PDF processed successfully!\n"
288
+ f"πŸ“Š Statistics:\n"
289
+ f"- Text chunks: {len(chunked_texts)}\n"
290
+ f"- Images processed: {image_count}\n"
291
+ f"- Methods used: {', '.join(methods)}\n"
292
+ f"- Total characters: {len(full_text)}")
293
+
294
+ return success_msg
295
+
296
+ except Exception as e:
297
+ return f"❌ Error processing PDF: {str(e)}\n\nTroubleshooting tips:\n- Ensure the PDF is not password protected\n- Try a different PDF file\n- Check if the file is corrupted"
298
+
299
+ def load_pdf(file_path, figures_directory):
300
+ """Legacy function - now handled by upload_and_process_pdf"""
301
+ return extract_text_with_multiple_methods(file_path)[0]
302
+
303
+ def extract_text_from_image(image_path):
304
+ """Extract text description from image using Hugging Face Vision model"""
305
+ try:
306
+ # First try OCR for any text in the image
307
+ ocr_text = ""
308
+ try:
309
+ img = Image.open(image_path)
310
+ ocr_text = pytesseract.image_to_string(img)
311
+ if ocr_text.strip():
312
+ ocr_text = f"Text in image: {ocr_text.strip()}"
313
+ except Exception as ocr_error:
314
+ print(f"⚠️ OCR failed for image: {ocr_error}")
315
+
316
+ # Then use vision model for description
317
+ vision_description = ""
318
+ if vision_client:
319
+ try:
320
+ with open(image_path, "rb") as img_file:
321
+ image_data = img_file.read()
322
+
323
+ response = vision_client.image_to_text(image_data)
324
+
325
+ if isinstance(response, list) and len(response) > 0:
326
+ vision_description = response[0].get('generated_text', '')
327
+ elif isinstance(response, dict):
328
+ vision_description = response.get('generated_text', '')
329
+ else:
330
+ vision_description = str(response)
331
+
332
+ except Exception as vision_error:
333
+ print(f"⚠️ Vision model failed: {vision_error}")
334
+
335
+ # Combine OCR and vision results
336
+ combined_result = []
337
+ if ocr_text:
338
+ combined_result.append(ocr_text)
339
+ if vision_description:
340
+ combined_result.append(f"Image description: {vision_description}")
341
+
342
+ if combined_result:
343
+ return "\n".join(combined_result)
344
+ else:
345
+ return "Image content: Visual element present but could not be processed"
346
+
347
+ except Exception as e:
348
+ print(f"⚠️ Error extracting text from image: {e}")
349
+ return "Image content: Visual element present but could not be processed"
350
+
351
+ def split_text(text):
352
+ """Split text into chunks"""
353
+ if not text or not text.strip():
354
+ return []
355
+
356
+ text_splitter = RecursiveCharacterTextSplitter(
357
+ chunk_size=1000,
358
+ chunk_overlap=200,
359
+ add_start_index=True
360
  )
361
+ return text_splitter.split_text(text)
362
+
363
+ def index_docs(texts):
364
+ """Index documents in vector store"""
365
+ if texts:
366
+ vector_store.add_texts(texts)
367
+ print(f"πŸ“š Indexed {len(texts)} text chunks")
368
+
369
+ def retrieve_docs(query, k=4):
370
+ """Retrieve relevant documents"""
371
+ try:
372
+ return vector_store.similarity_search(query, k=k)
373
+ except Exception as e:
374
+ print(f"⚠️ Error retrieving documents: {e}")
375
+ return []
376
+
377
+ def answer_question_hf(question):
378
+ """Answer question using Hugging Face multimodal models"""
379
+ try:
380
+ # Retrieve relevant documents
381
+ related_documents = retrieve_docs(question)
382
+
383
+ if not related_documents:
384
+ return "❓ No relevant documents found. Please upload and process a PDF first."
385
+
386
+ # Prepare context
387
+ context = "\n\n".join([doc.page_content for doc in related_documents])
388
+
389
+ # Limit context length for better performance
390
+ if len(context) > 1500:
391
+ context = context[:1500] + "..."
392
+
393
+ # Check if we have image content in the context
394
+ has_image_content = "Image content:" in context or "Image description:" in context
395
+
396
+ if has_image_content and vision_client:
397
+ # Use multimodal approach for questions involving images
398
+ try:
399
+ # For multimodal models, we can send both text and image context
400
+ multimodal_prompt = f"""
401
+ Based on the document content below (including text and image descriptions), answer this question: {question}
402
+
403
+ Document content:
404
+ {context}
405
+
406
+ Please provide a clear, concise answer in 2-3 sentences.
407
+ """
408
+
409
+ response = vision_client.text_generation(
410
+ multimodal_prompt,
411
+ max_new_tokens=150,
412
+ temperature=0.7,
413
+ do_sample=True,
414
+ return_full_text=False,
415
+ stop=["Question:", "Document content:", "\n\n\n"]
416
+ )
417
+
418
+ if isinstance(response, dict):
419
+ answer = response.get('generated_text', '')
420
+ elif isinstance(response, str):
421
+ answer = response
422
+ else:
423
+ answer = str(response)
424
+
425
+ if answer.strip():
426
+ return f"πŸ–ΌοΈ {answer.strip()}"
427
+
428
+ except Exception as multimodal_error:
429
+ print(f"⚠️ Multimodal model failed: {multimodal_error}")
430
+
431
+ # Fall back to text-only approach
432
+ if text_client:
433
+ try:
434
+ text_prompt = f"""
435
+ Question: {question}
436
+
437
+ Based on the following information from the document, provide a clear and concise answer:
438
+
439
+ {context}
440
+
441
+ Answer:"""
442
+
443
+ response = text_client.text_generation(
444
+ text_prompt,
445
+ max_new_tokens=150,
446
+ temperature=0.7,
447
+ do_sample=True,
448
+ return_full_text=False,
449
+ stop=["Question:", "Answer:", "\n\n\n"]
450
+ )
451
+
452
+ if isinstance(response, dict):
453
+ answer = response.get('generated_text', '')
454
+ elif isinstance(response, str):
455
+ answer = response
456
+ else:
457
+ answer = str(response)
458
+
459
+ # Clean up the answer
460
+ answer = answer.strip()
461
+ if answer:
462
+ return f"πŸ“„ {answer}"
463
+
464
+ except Exception as text_error:
465
+ print(f"⚠️ Text model failed: {text_error}")
466
+
467
+ # Last resort: Return extracted context
468
+ if context:
469
+ return f"πŸ“‹ Based on the document, here's the relevant information:\n\n{context[:500]}{'...' if len(context) > 500 else ''}"
470
+ else:
471
+ return "❌ Unable to find relevant information in the document."
472
+
473
+ except Exception as e:
474
+ return f"❌ Error generating answer: {str(e)}"
475
+
476
+ def create_colab_interface():
477
+ """Create Gradio interface optimized for Colab"""
478
+
479
+ with gr.Blocks(
480
+ title="Enhanced Multimodal RAG with Hugging Face",
481
+ theme=gr.themes.Soft(),
482
+ css="""
483
+ .gradio-container {
484
+ max-width: 1200px !important;
485
+ }
486
+ """
487
+ ) as iface:
488
+
489
+ gr.HTML("""
490
+ <div style="text-align: center; padding: 20px;">
491
+ <h1>πŸ“š Enhanced Multimodal RAG with Hugging Face</h1>
492
+ <p>Upload a PDF document and ask questions about its content, including images and tables!</p>
493
+ <p><em>Now with improved PDF processing and multiple extraction methods</em></p>
494
+ </div>
495
+ """)
496
+
497
+ with gr.Row():
498
+ with gr.Column(scale=1):
499
+ # PDF Upload Section
500
+ gr.Markdown("### πŸ“€ Upload Document")
501
+ pdf_input = gr.File(
502
+ label="Upload PDF Document",
503
+ file_types=[".pdf"],
504
+ type="binary",
505
+ height=100
506
+ )
507
+
508
+ upload_btn = gr.Button("πŸ”„ Process PDF", variant="primary", size="lg")
509
+ upload_status = gr.Textbox(
510
+ label="Processing Status",
511
+ interactive=False,
512
+ lines=6,
513
+ placeholder="Upload a PDF and click 'Process PDF' to begin..."
514
+ )
515
+
516
+ with gr.Column(scale=2):
517
+ # Chat Interface
518
+ gr.Markdown("### πŸ’¬ Chat Interface")
519
+ chatbot = gr.Chatbot(
520
+ label="Chat with your document",
521
+ height=400,
522
+ show_label=False
523
+ )
524
+
525
+ with gr.Row():
526
+ question_input = gr.Textbox(
527
+ label="Ask a question",
528
+ placeholder="What is this document about?",
529
+ lines=1,
530
+ scale=4
531
+ )
532
+ ask_btn = gr.Button("Ask", variant="secondary", scale=1)
533
+
534
+ # Example questions
535
+ gr.Markdown("### πŸ’‘ Example Questions")
536
+ example_questions = [
537
+ "What is the main topic of this document?",
538
+ "Can you summarize the key points?",
539
+ "What information is shown in the images or tables?",
540
+ "What are the conclusions or recommendations?"
541
+ ]
542
+
543
+ with gr.Row():
544
+ for i, eq in enumerate(example_questions):
545
+ example_btn = gr.Button(eq, size="sm")
546
+ example_btn.click(
547
+ lambda x=eq: x,
548
+ outputs=[question_input]
549
+ )
550
+
551
+ # Event handlers
552
+ def process_pdf_and_update(pdf_file):
553
+ if pdf_file is None:
554
+ return "Please select a PDF file first."
555
+ return upload_and_process_pdf(pdf_file)
556
+
557
+ def ask_and_update_chat(question, chat_history):
558
+ if not question.strip():
559
+ return chat_history, ""
560
+
561
+ # Get answer
562
+ answer = answer_question_hf(question)
563
+
564
+ # Update chat history
565
+ if chat_history is None:
566
+ chat_history = []
567
+
568
+ chat_history.append([question, answer])
569
+
570
+ return chat_history, ""
571
+
572
+ def clear_chat():
573
+ return []
574
+
575
+ # Connect events
576
+ upload_btn.click(
577
+ fn=process_pdf_and_update,
578
+ inputs=[pdf_input],
579
+ outputs=[upload_status]
580
+ )
581
+
582
+ ask_btn.click(
583
+ fn=ask_and_update_chat,
584
+ inputs=[question_input, chatbot],
585
+ outputs=[chatbot, question_input]
586
+ )
587
+
588
+ question_input.submit(
589
+ fn=ask_and_update_chat,
590
+ inputs=[question_input, chatbot],
591
+ outputs=[chatbot, question_input]
592
+ )
593
+
594
+ # Clear chat button
595
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="stop", size="sm")
596
+ clear_btn.click(
597
+ fn=clear_chat,
598
+ outputs=[chatbot]
599
+ )
600
+
601
+ # Enhanced Instructions
602
+ gr.Markdown("""
603
+ ---
604
+ ### πŸ“‹ Instructions:
605
+ 1. **Get HF Token**: Visit [Hugging Face Settings](https://huggingface.co/settings/tokens) to get your token
606
+ 2. **Upload PDF**: Click "Choose File" and select your PDF document
607
+ 3. **Process Document**: Click "Process PDF" and wait for confirmation
608
+ 4. **Ask Questions**: Type questions or use example prompts
609
+
610
+ ### ✨ Enhanced Features:
611
+ - πŸ“„ **Multiple Text Extraction Methods**: PyPDF2, PyMuPDF, OCR, and Unstructured
612
+ - πŸ–ΌοΈ **Advanced Image Processing**: Direct PDF image extraction + vision models
613
+ - πŸ” **Robust PDF Handling**: Works with scanned PDFs, complex layouts, and image-heavy documents
614
+ - πŸ’¬ **Interactive Chat**: Conversation history with multimodal understanding
615
+ - ⚑ **Error Recovery**: Graceful fallbacks when one extraction method fails
616
+ - πŸ“Š **Processing Statistics**: Detailed feedback on what was extracted
617
+
618
+ ### πŸ”§ Models Used:
619
+ - **🎭 Multimodal**: Microsoft GIT-Large (understands images + text together)
620
+ - **πŸ“ Text Generation**: Google FLAN-T5-Base (optimized for Q&A)
621
+ - **πŸ‘οΈ Vision**: Salesforce BLIP (image captioning and understanding)
622
+ - **πŸ” Embeddings**: Sentence Transformers all-MiniLM-L6-v2
623
+ - **πŸ“– OCR**: Tesseract for text recognition in images
624
+
625
+ ### 🎯 Multimodal Capabilities:
626
+ - **Text + Images**: Can answer questions about both text content and visual elements
627
+ - **Image Understanding**: Describes charts, diagrams, photos in your PDFs
628
+ - **OCR Integration**: Extracts text from images within PDFs
629
+ - **Context Awareness**: Combines text and visual information for comprehensive answers
630
+ - **Fallback Strategy**: Uses multiple methods to ensure successful text extraction
631
+
632
+ ### πŸ› οΈ Troubleshooting:
633
+ - **No text extracted**: Try different PDF files, ensure not password-protected
634
+ - **Large files**: Keep PDFs under 50MB for optimal performance
635
+ - **Scanned PDFs**: OCR will automatically process image-based text
636
+ - **Complex layouts**: Multiple extraction methods handle various PDF formats
637
+ """)
638
+
639
+ return iface
640
+
641
+ # Step 4: Launch the application
642
+ print("βœ… Setup complete! Launching Enhanced Gradio interface...")
643
+
644
+ # Create and launch interface
645
+ iface = create_colab_interface()
646
+
647
+ # Launch with public link for Colab
648
+ iface.launch(
649
+ debug=True,
650
+ share=True, # Creates public link
651
+ server_name="0.0.0.0",
652
+ server_port=7860,
653
+ show_error=True
654
+ )
655
+
656
+ print("πŸŽ‰ Enhanced Application launched successfully!")
657
+ print("πŸ“± Use the public link above to access your app from anywhere!")