Summarization / app.py
ikraamkb's picture
Update app.py
f895e21 verified
raw
history blame
9.34 kB
"""import gradio as gr
from transformers import pipeline
import fitz # PyMuPDF
import docx
import pptx
import openpyxl
import os
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
# Load your custom summarization model
pipe = pipeline("summarization", model="facebook/bart-large-cnn", tokenizer="facebook/bart-large-cnn")
# Document text extraction function
def extract_text(file):
ext = file.name.split(".")[-1].lower()
path = file.name
if ext == "pdf":
try:
with fitz.open(path) as doc:
return "\n".join([page.get_text("text") for page in doc])
except Exception as e:
return f"Error reading PDF: {e}"
elif ext == "docx":
try:
doc = docx.Document(path)
return "\n".join([p.text for p in doc.paragraphs])
except Exception as e:
return f"Error reading DOCX: {e}"
elif ext == "pptx":
try:
prs = pptx.Presentation(path)
text = ""
for slide in prs.slides:
for shape in slide.shapes:
if hasattr(shape, "text"):
text += shape.text + "\n"
return text
except Exception as e:
return f"Error reading PPTX: {e}"
elif ext == "xlsx":
try:
wb = openpyxl.load_workbook(path)
text = ""
for sheet in wb.sheetnames:
for row in wb[sheet].iter_rows(values_only=True):
text += " ".join([str(cell) for cell in row if cell]) + "\n"
return text
except Exception as e:
return f"Error reading XLSX: {e}"
else:
return "Unsupported file format"
# Summarization logic
def summarize_document(file):
text = extract_text(file)
if "Error" in text or "Unsupported" in text:
return text
word_count = len(text.split())
max_summary_len = max(20, int(word_count * 0.2))
try:
summary = pipe(text, max_length=max_summary_len, min_length=int(max_summary_len * 0.6), do_sample=False)
# Print the summary to debug its structure
print(summary)
return summary[0]['summary_text'] # Access the correct key for the output
except Exception as e:
return f"Error during summarization: {e}"
# Gradio Interface
demo = gr.Interface(
fn=summarize_document,
inputs=gr.File(label="Upload a document (PDF, DOCX, PPTX, XLSX)", file_types=[".pdf", ".docx", ".pptx", ".xlsx"]),
outputs=gr.Textbox(label="20% Summary"),
title="πŸ“„ Document Summarizer (20% Length)",
description="Upload a document and get a concise summary generated by your custom Hugging Face model."
)
# FastAPI setup
app = FastAPI()
# Mount Gradio at "/"
app = gr.mount_gradio_app(app, demo, path="/")
# Optional root redirect
@app.get("/")
def redirect_to_interface():
return RedirectResponse(url="/")"""
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import fitz # PyMuPDF
import docx
import pptx
import openpyxl
import re
import nltk
from nltk.tokenize import sent_tokenize
import torch
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
# Download required NLTK data
nltk.download('punkt', quiet=True)
# Initialize components
app = FastAPI()
# Load summarization model (CPU optimized)
MODEL_NAME = "facebook/bart-large-cnn"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
summarizer = pipeline(
"summarization",
model=model,
tokenizer=tokenizer,
device=-1, # Force CPU usage
torch_dtype=torch.float32
)
def clean_text(text: str) -> str:
"""Clean and normalize document text"""
text = re.sub(r'\s+', ' ', text) # Normalize whitespace
text = re.sub(r'β€’\s*|\d\.\s+', '', text) # Remove bullets and numbering
text = re.sub(r'\[.*?\]|\(.*?\)', '', text) # Remove brackets/parentheses
text = re.sub(r'\bPage\s*\d+\b', '', text, flags=re.IGNORECASE) # Remove page numbers
return text.strip()
def extract_text(file_path: str, file_extension: str) -> tuple[str, str]:
"""Extract text from various document formats"""
try:
if file_extension == "pdf":
with fitz.open(file_path) as doc:
return clean_text("\n".join(page.get_text("text") for page in doc)), ""
elif file_extension == "docx":
doc = docx.Document(file_path)
return clean_text("\n".join(p.text for p in doc.paragraphs)), ""
elif file_extension == "pptx":
prs = pptx.Presentation(file_path)
text = []
for slide in prs.slides:
for shape in slide.shapes:
if hasattr(shape, "text"):
text.append(shape.text)
return clean_text("\n".join(text)), ""
elif file_extension == "xlsx":
wb = openpyxl.load_workbook(file_path, read_only=True)
text = []
for sheet in wb.sheetnames:
for row in wb[sheet].iter_rows(values_only=True):
text.append(" ".join(str(cell) for cell in row if cell))
return clean_text("\n".join(text)), ""
return "", "Unsupported file format"
except Exception as e:
return "", f"Error reading {file_extension.upper()} file: {str(e)}"
def chunk_text(text: str, max_tokens: int = 768) -> list[str]:
"""Split text into manageable chunks for summarization"""
try:
sentences = sent_tokenize(text)
except:
# Fallback if sentence tokenization fails
words = text.split()
sentences = [' '.join(words[i:i+20]) for i in range(0, len(words), 20)]
chunks = []
current_chunk = ""
for sentence in sentences:
if len(current_chunk.split()) + len(sentence.split()) <= max_tokens:
current_chunk += " " + sentence
else:
chunks.append(current_chunk.strip())
current_chunk = sentence
if current_chunk:
chunks.append(current_chunk.strip())
return chunks
def generate_summary(text: str, length: str = "medium") -> str:
"""Generate summary with appropriate length parameters"""
length_params = {
"short": {"max_length": 80, "min_length": 30},
"medium": {"max_length": 150, "min_length": 60},
"long": {"max_length": 200, "min_length": 80}
}
chunks = chunk_text(text)
summaries = []
for chunk in chunks:
try:
summary = summarizer(
chunk,
max_length=length_params[length]["max_length"],
min_length=length_params[length]["min_length"],
do_sample=False,
truncation=True,
no_repeat_ngram_size=2,
num_beams=2,
early_stopping=True
)
summaries.append(summary[0]['summary_text'])
except Exception as e:
summaries.append(f"[Chunk error: {str(e)}]")
# Combine and format the final summary
final_summary = " ".join(summaries)
final_summary = ". ".join(s.strip().capitalize() for s in final_summary.split(". ") if s.strip())
return final_summary if len(final_summary) > 25 else "Summary too short - document may be too brief"
def summarize_document(file, summary_length: str):
"""Main processing function for Gradio interface"""
if file is None:
return "Please upload a document first", "Ready"
file_path = file.name
file_extension = file_path.split(".")[-1].lower()
text, error = extract_text(file_path, file_extension)
if error:
return error, "Error"
if not text or len(text.split()) < 30:
return "Document is too short or contains too little text to summarize", "Ready"
try:
summary = generate_summary(text, summary_length)
return summary, "Summary complete"
except Exception as e:
return f"Summarization error: {str(e)}", "Error"
# Gradio Interface
with gr.Blocks(title="Document Summarizer", theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ“„ Document Summarizer")
gr.Markdown("Upload a document to generate a concise summary")
with gr.Row():
with gr.Column():
file_input = gr.File(
label="Upload Document",
file_types=[".pdf", ".docx", ".pptx", ".xlsx"],
type="filepath"
)
length_radio = gr.Radio(
["short", "medium", "long"],
value="medium",
label="Summary Length"
)
submit_btn = gr.Button("Generate Summary", variant="primary")
with gr.Column():
output = gr.Textbox(label="Summary", lines=10)
status = gr.Textbox(label="Status", interactive=False)
submit_btn.click(
fn=summarize_document,
inputs=[file_input, length_radio],
outputs=[output, status],
api_name="summarize"
)
# Mount Gradio app to FastAPI
app = gr.mount_gradio_app(app, demo, path="/")
@app.get("/")
def redirect_to_interface():
return RedirectResponse(url="/")