Spaces:
Sleeping
Sleeping
File size: 2,332 Bytes
fbfbbd7 0881f45 fbfbbd7 0881f45 fbfbbd7 0881f45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from typing import List, Dict
import pypdf
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
class PDFProcessor:
def __init__(self):
self.text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len,
separators=["\n\n", "\n", " ", ""]
)
def process_pdf(self, pdf_path: str) -> List[Dict]:
"""
Process a PDF file and return chunks of text with metadata.
Args:
pdf_path (str): Path to the PDF file
Returns:
List[Dict]: List of text chunks with metadata
"""
try:
# Try using PyPDFLoader from langchain
loader = PyPDFLoader(pdf_path)
pages = loader.load()
# Split the text into chunks
chunks = []
for page in pages:
page_chunks = self.text_splitter.split_text(page.page_content)
for chunk in page_chunks:
chunks.append({
'text': chunk,
'metadata': {'page': page.metadata['page']}
})
return chunks
except Exception as e:
print(f"Error with PyPDFLoader: {str(e)}")
print("Trying alternative PDF processing method...")
# Fallback to direct pypdf usage
try:
with open(pdf_path, 'rb') as file:
pdf = pypdf.PdfReader(file)
chunks = []
for page_num in range(len(pdf.pages)):
text = pdf.pages[page_num].extract_text()
page_chunks = self.text_splitter.split_text(text)
for chunk in page_chunks:
chunks.append({
'text': chunk,
'metadata': {'page': page_num + 1}
})
return chunks
except Exception as e2:
raise Exception(f"Failed to process PDF with both methods. Error: {str(e2)}")
|