"""from fastapi import FastAPI, UploadFile, File, Form, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import HTMLResponse, JSONResponse, FileResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates import os import tempfile from typing import Optional # Initialize FastAPI app = FastAPI() # CORS Policy: allow everything (because Hugging Face Spaces needs it open) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Static files and templates app.mount("/static", StaticFiles(directory="static"), name="static") app.mount("/resources", StaticFiles(directory="resources"), name="resources") templates = Jinja2Templates(directory="templates") # --- Serve Frontend --- @app.get("/", response_class=HTMLResponse) async def serve_home(request: Request): return templates.TemplateResponse("HomeS.html", {"request": request}) # --- API Endpoints that frontend needs --- @app.post("/summarize/") async def summarize_document_endpoint(file: UploadFile = File(...), length: str = Form("medium")): try: from app import summarize_api return await summarize_api(file, length) except Exception as e: return JSONResponse({"error": f"Summarization failed: {str(e)}"}, status_code=500) @app.post("/imagecaption/") async def caption_image_endpoint(file: UploadFile = File(...)): try: from appImage import caption_from_frontend return await caption_from_frontend(file) except Exception as e: return JSONResponse({"error": f"Image captioning failed: {str(e)}"}, status_code=500) # --- Serve generated audio/pdf files --- @app.get("/files/{filename}") async def serve_file(filename: str): path = os.path.join(tempfile.gettempdir(), filename) if os.path.exists(path): return FileResponse(path) return JSONResponse({"error": "File not found"}, status_code=404) # (Optional) Unified prediction endpoint — Only if you want @app.post("/predict") async def predict( file: UploadFile = File(...), option: str = Form(...), # "Summarize" or "Captioning" length: Optional[str] = Form(None) # Only for Summarize ): try: if option == "Summarize": return await summarize_document_endpoint(file, length or "medium") elif option == "Captioning": return await caption_image_endpoint(file) else: return JSONResponse({"error": "Invalid option"}, status_code=400) except Exception as e: return JSONResponse({"error": f"Prediction failed: {str(e)}"}, status_code=500) """ from fastapi import FastAPI, UploadFile, File, Form, Request, HTTPException from fastapi.responses import HTMLResponse, JSONResponse, FileResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from fastapi.middleware.cors import CORSMiddleware from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoProcessor, AutoModelForCausalLM from PIL import Image import torch import fitz # PyMuPDF import docx import pptx import openpyxl import re import nltk from nltk.tokenize import sent_tokenize from gtts import gTTS from fpdf import FPDF import tempfile import os import shutil import datetime import hashlib import easyocr from typing import Optional # Initialize app app = FastAPI() # CORS Configuration app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Static assets app.mount("/static", StaticFiles(directory="static"), name="static") app.mount("/resources", StaticFiles(directory="resources"), name="resources") # Templates templates = Jinja2Templates(directory="templates") # Initialize models nltk.download('punkt', quiet=True) # Document processing models try: tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn") model.eval() summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, device=-1) reader = easyocr.Reader(['en'], gpu=torch.cuda.is_available()) except Exception as e: print(f"Error loading summarization models: {e}") summarizer = None # Image captioning models try: processor = AutoProcessor.from_pretrained("microsoft/git-large-coco") git_model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco") git_model.eval() USE_GIT = True except Exception as e: print(f"Error loading GIT model, falling back to ViT: {e}") captioner = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning") USE_GIT = False # Helper functions def clean_text(text: str) -> str: text = re.sub(r'\s+', ' ', text) text = re.sub(r'\u2022\s*|\d\.\s+', '', text) text = re.sub(r'\[.*?\]|\(.*?\)', '', text) text = re.sub(r'\bPage\s*\d+\b', '', text, flags=re.IGNORECASE) return text.strip() def extract_text(file_path: str, file_extension: str): try: if file_extension == "pdf": with fitz.open(file_path) as doc: text = "\n".join(page.get_text("text") for page in doc) if len(text.strip()) < 50: images = [page.get_pixmap() for page in doc] temp_img = tempfile.NamedTemporaryFile(suffix=".png", delete=False) images[0].save(temp_img.name) ocr_result = reader.readtext(temp_img.name, detail=0) os.unlink(temp_img.name) text = "\n".join(ocr_result) if ocr_result else text return clean_text(text), "" elif file_extension == "docx": doc = docx.Document(file_path) return clean_text("\n".join(p.text for p in doc.paragraphs)), "" elif file_extension == "pptx": prs = pptx.Presentation(file_path) text = [shape.text for slide in prs.slides for shape in slide.shapes if hasattr(shape, "text")] return clean_text("\n".join(text)), "" elif file_extension == "xlsx": wb = openpyxl.load_workbook(file_path, read_only=True) text = [" ".join(str(cell) for cell in row if cell) for sheet in wb.sheetnames for row in wb[sheet].iter_rows(values_only=True)] return clean_text("\n".join(text)), "" return "", "Unsupported file format" except Exception as e: return "", f"Error reading {file_extension.upper()} file: {str(e)}" def chunk_text(text: str, max_tokens: int = 950): try: sentences = sent_tokenize(text) except: words = text.split() sentences = [' '.join(words[i:i+20]) for i in range(0, len(words), 20)] chunks = [] current_chunk = "" for sentence in sentences: token_length = len(tokenizer.encode(current_chunk + " " + sentence)) if token_length <= max_tokens: current_chunk += " " + sentence else: chunks.append(current_chunk.strip()) current_chunk = sentence if current_chunk: chunks.append(current_chunk.strip()) return chunks def generate_summary(text: str, length: str = "medium") -> str: cache_key = hashlib.md5((text + length).encode()).hexdigest() length_params = { "short": {"max_length": 80, "min_length": 30}, "medium": {"max_length": 200, "min_length": 80}, "long": {"max_length": 300, "min_length": 210} } chunks = chunk_text(text) try: summaries = summarizer( chunks, max_length=length_params[length]["max_length"], min_length=length_params[length]["min_length"], do_sample=False, truncation=True ) summary_texts = [s['summary_text'] for s in summaries] except Exception as e: summary_texts = [f"[Error: {str(e)}"] final_summary = " ".join(summary_texts) final_summary = ". ".join(s.strip().capitalize() for s in final_summary.split(". ") if s.strip()) return final_summary if len(final_summary) > 25 else "Summary too short" def text_to_speech(text: str): try: tts = gTTS(text) temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") tts.save(temp_audio.name) return temp_audio.name except Exception as e: print(f"Error in text-to-speech: {e}") return "" def create_pdf(summary: str, original_filename: str): try: pdf = FPDF() pdf.add_page() pdf.set_font("Arial", 'B', 16) pdf.cell(200, 10, txt="Document Summary", ln=1, align='C') pdf.set_font("Arial", size=12) pdf.cell(200, 10, txt=f"Original file: {original_filename}", ln=1) pdf.cell(200, 10, txt=f"Generated on: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", ln=1) pdf.ln(10) pdf.multi_cell(0, 10, txt=summary) temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") pdf.output(temp_pdf.name) return temp_pdf.name except Exception as e: print(f"Error creating PDF: {e}") return "" def generate_caption(image_path: str) -> str: try: if USE_GIT: image = Image.open(image_path).convert("RGB") inputs = processor(images=image, return_tensors="pt") outputs = git_model.generate(**inputs, max_length=50) caption = processor.batch_decode(outputs, skip_special_tokens=True)[0] else: result = captioner(image_path) caption = result[0]['generated_text'] return caption except Exception as e: raise Exception(f"Caption generation failed: {str(e)}") # API Endpoints @app.post("/summarize/") async def summarize_document(file: UploadFile = File(...), length: str = Form("medium")): valid_types = [ 'application/pdf', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ] if file.content_type not in valid_types: raise HTTPException( status_code=400, detail="Please upload a valid document (PDF, DOCX, PPTX, or XLSX)" ) try: # Save temp file with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as temp: shutil.copyfileobj(file.file, temp) temp_path = temp.name # Process file text, error = extract_text(temp_path, os.path.splitext(file.filename)[1][1:].lower()) if error: raise HTTPException(status_code=400, detail=error) if not text or len(text.split()) < 30: raise HTTPException(status_code=400, detail="Document too short to summarize") summary = generate_summary(text, length) audio_path = text_to_speech(summary) pdf_path = create_pdf(summary, file.filename) return { "summary": summary, "audio_url": f"/files/{os.path.basename(audio_path)}" if audio_path else None, "pdf_url": f"/files/{os.path.basename(pdf_path)}" if pdf_path else None } except HTTPException: raise except Exception as e: raise HTTPException( status_code=500, detail=f"Summarization failed: {str(e)}" ) finally: if 'temp_path' in locals() and os.path.exists(temp_path): os.unlink(temp_path) @app.post("/imagecaption/") async def caption_image(file: UploadFile = File(...)): valid_types = ['image/jpeg', 'image/png', 'image/gif', 'image/webp'] if file.content_type not in valid_types: raise HTTPException( status_code=400, detail="Please upload a valid image (JPEG, PNG, GIF, or WEBP)" ) try: # Save temp file with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as temp: shutil.copyfileobj(file.file, temp) temp_path = temp.name # Generate caption caption = generate_caption(temp_path) # Generate audio audio_path = text_to_speech(caption) return { "answer": caption, "audio": f"/files/{os.path.basename(audio_path)}" if audio_path else None } except HTTPException: raise except Exception as e: raise HTTPException( status_code=500, detail=str(e) ) finally: if 'temp_path' in locals() and os.path.exists(temp_path): os.unlink(temp_path) @app.get("/files/{filename}") async def serve_file(filename: str): file_path = os.path.join(tempfile.gettempdir(), filename) if os.path.exists(file_path): return FileResponse(file_path) raise HTTPException(status_code=404, detail="File not found") @app.get("/", response_class=HTMLResponse) async def serve_home(request: Request): return templates.TemplateResponse("HomeS.html", {"request": request})