ikraamkb commited on
Commit
4c56bf5
·
verified ·
1 Parent(s): e72bba1

Update Summarization/app.py

Browse files
Files changed (1) hide show
  1. Summarization/app.py +0 -142
Summarization/app.py CHANGED
@@ -1,145 +1,3 @@
1
- # app.py
2
- """from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
3
- import fitz, docx, pptx, openpyxl, re, nltk, tempfile, os, easyocr, datetime, hashlib
4
- from nltk.tokenize import sent_tokenize
5
- from fpdf import FPDF
6
- from gtts import gTTS
7
-
8
- nltk.download('punkt', quiet=True)
9
-
10
- # Load models
11
- MODEL_NAME = "facebook/bart-large-cnn"
12
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
14
- model.eval()
15
- summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, device=-1, batch_size=4)
16
- reader = easyocr.Reader(['en'], gpu=False)
17
-
18
- summary_cache = {}
19
-
20
- def clean_text(text: str) -> str:
21
- text = re.sub(r'\s+', ' ', text)
22
- text = re.sub(r'\u2022\s*|\d\.\s+', '', text)
23
- text = re.sub(r'\[.*?\]|\(.*?\)', '', text)
24
- text = re.sub(r'\bPage\s*\d+\b', '', text, flags=re.IGNORECASE)
25
- return text.strip()
26
-
27
- def extract_text(file_path: str, ext: str):
28
- try:
29
- if ext == "pdf":
30
- with fitz.open(file_path) as doc:
31
- text = "\n".join(page.get_text("text") for page in doc)
32
- if len(text.strip()) < 50:
33
- images = [page.get_pixmap() for page in doc]
34
- temp_img = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
35
- images[0].save(temp_img.name)
36
- text = "\n".join(reader.readtext(temp_img.name, detail=0))
37
- os.unlink(temp_img.name)
38
- elif ext == "docx":
39
- doc = docx.Document(file_path)
40
- text = "\n".join(p.text for p in doc.paragraphs)
41
- elif ext == "pptx":
42
- prs = pptx.Presentation(file_path)
43
- text = "\n".join(shape.text for slide in prs.slides for shape in slide.shapes if hasattr(shape, "text"))
44
- elif ext == "xlsx":
45
- wb = openpyxl.load_workbook(file_path, read_only=True)
46
- text = "\n".join([" ".join(str(cell) for cell in row if cell) for sheet in wb.sheetnames for row in wb[sheet].iter_rows(values_only=True)])
47
- else:
48
- text = ""
49
- except Exception as e:
50
- return "", f"Error extracting text: {str(e)}"
51
-
52
- return clean_text(text), ""
53
-
54
- def chunk_text(text: str, max_tokens: int = 950):
55
- sentences = sent_tokenize(text)
56
- chunks, current_chunk = [], ""
57
- for sentence in sentences:
58
- if len(tokenizer.encode(current_chunk + " " + sentence)) <= max_tokens:
59
- current_chunk += " " + sentence
60
- else:
61
- chunks.append(current_chunk.strip())
62
- current_chunk = sentence
63
- if current_chunk:
64
- chunks.append(current_chunk.strip())
65
- return chunks
66
-
67
- def generate_summary(text: str, length: str = "medium"):
68
- cache_key = hashlib.md5((text + length).encode()).hexdigest()
69
- if cache_key in summary_cache:
70
- return summary_cache[cache_key]
71
-
72
- length_params = {
73
- "short": {"max_length": 80, "min_length": 30},
74
- "medium": {"max_length": 200, "min_length": 80},
75
- "long": {"max_length": 300, "min_length": 210}
76
- }
77
-
78
- chunks = chunk_text(text)
79
- summaries = summarizer(
80
- chunks,
81
- max_length=length_params[length]["max_length"],
82
- min_length=length_params[length]["min_length"],
83
- do_sample=False,
84
- truncation=True,
85
- no_repeat_ngram_size=2,
86
- num_beams=2,
87
- early_stopping=True
88
- )
89
- final_summary = " ".join(s['summary_text'] for s in summaries)
90
- final_summary = ". ".join(s.strip().capitalize() for s in final_summary.split(". ") if s.strip())
91
- final_summary = final_summary if len(final_summary) > 25 else "Summary too short."
92
-
93
- summary_cache[cache_key] = final_summary
94
- return final_summary
95
-
96
- def text_to_speech(text: str):
97
- try:
98
- tts = gTTS(text)
99
- temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
100
- tts.save(temp_audio.name)
101
- return temp_audio.name
102
- except:
103
- return ""
104
-
105
- def create_pdf(summary: str, filename: str):
106
- try:
107
- pdf = FPDF()
108
- pdf.add_page()
109
- pdf.set_font("Arial", size=12)
110
- pdf.multi_cell(0, 10, summary)
111
- temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
112
- pdf.output(temp_pdf.name)
113
- return temp_pdf.name
114
- except:
115
- return ""
116
-
117
- async def summarize_document(file, length="medium"):
118
- contents = await file.read()
119
- with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
120
- tmp_file.write(contents)
121
- tmp_path = tmp_file.name
122
-
123
- ext = file.filename.split('.')[-1].lower()
124
- text, error = extract_text(tmp_path, ext)
125
-
126
- if error:
127
- raise Exception(error)
128
-
129
- if not text or len(text.split()) < 30:
130
- raise Exception("Document too short to summarize.")
131
-
132
- summary = generate_summary(text, length)
133
- audio_path = text_to_speech(summary)
134
- pdf_path = create_pdf(summary, file.filename)
135
-
136
- result = {"summary": summary}
137
- if audio_path:
138
- result["audioUrl"] = f"/files/{os.path.basename(audio_path)}"
139
- if pdf_path:
140
- result["pdfUrl"] = f"/files/{os.path.basename(pdf_path)}"
141
- return result"""
142
- # app.py
143
 
144
  from fastapi import UploadFile, File
145
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  from fastapi import UploadFile, File
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM