Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -184,7 +184,6 @@ def validate_file(file_obj) -> None:
|
|
184 |
def preprocess_text(text: str) -> str:
|
185 |
"""Normalize text for more reliable parsing"""
|
186 |
text = re.sub(r'\s+', ' ', text) # Normalize whitespace
|
187 |
-
text = text.replace('|', ' ') # Handle common OCR errors
|
188 |
text = text.upper() # Standardize case for certain fields
|
189 |
return text
|
190 |
|
@@ -198,29 +197,32 @@ def extract_text_from_file(file_path: str, file_ext: str) -> str:
|
|
198 |
import pdfplumber
|
199 |
with pdfplumber.open(file_path) as pdf:
|
200 |
for page in pdf.pages:
|
201 |
-
# Try tables first
|
202 |
-
tables = page.extract_tables(
|
|
|
|
|
|
|
|
|
|
|
203 |
if tables:
|
204 |
for table in tables:
|
205 |
-
|
206 |
-
" | ".join(str(cell) for cell in row if cell
|
207 |
-
|
208 |
-
|
209 |
-
# Fall back to text extraction
|
210 |
page_text = page.extract_text()
|
211 |
if page_text:
|
212 |
text += page_text + "\n"
|
|
|
213 |
if not text.strip():
|
214 |
raise ValueError("PDFPlumber returned empty text")
|
|
|
215 |
except Exception as e:
|
216 |
logging.warning(f"PDFPlumber failed: {str(e)}. Trying PyMuPDF...")
|
217 |
doc = fitz.open(file_path)
|
218 |
for page in doc:
|
219 |
text += page.get_text("text") + '\n'
|
220 |
-
|
221 |
-
logging.warning("PyMuPDF returned empty text, trying OCR fallback...")
|
222 |
-
text = extract_text_from_pdf_with_ocr(file_path)
|
223 |
-
|
224 |
elif file_ext in ['.png', '.jpg', '.jpeg']:
|
225 |
text = extract_text_with_ocr(file_path)
|
226 |
|
@@ -233,7 +235,7 @@ def extract_text_from_file(file_path: str, file_ext: str) -> str:
|
|
233 |
|
234 |
except Exception as e:
|
235 |
logging.error(f"Text extraction error: {str(e)}")
|
236 |
-
raise
|
237 |
|
238 |
def extract_text_from_pdf_with_ocr(file_path: str) -> str:
|
239 |
try:
|
@@ -271,18 +273,35 @@ def extract_text_with_ocr(file_path: str) -> str:
|
|
271 |
raise ValueError(f"OCR processing failed: {str(e)}")
|
272 |
|
273 |
def clean_extracted_text(text: str) -> str:
|
|
|
|
|
274 |
text = re.sub(r'\s+', ' ', text).strip()
|
|
|
|
|
275 |
replacements = {
|
276 |
-
'
|
277 |
-
'
|
278 |
-
'
|
279 |
-
'
|
280 |
-
'
|
281 |
-
'
|
282 |
-
'
|
|
|
|
|
|
|
|
|
|
|
283 |
}
|
284 |
-
|
285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
return text
|
287 |
|
288 |
def remove_sensitive_info(text: str) -> str:
|
@@ -345,7 +364,7 @@ class TranscriptParser:
|
|
345 |
raise ValueError(f"Couldn't parse transcript content. Error: {str(e)}")
|
346 |
|
347 |
def _parse_detailed_transcript(self, text: str) -> Optional[Dict]:
|
348 |
-
"""Parse detailed transcript format with improved patterns"""
|
349 |
try:
|
350 |
parsed_data = {
|
351 |
'student_info': {},
|
@@ -354,102 +373,113 @@ class TranscriptParser:
|
|
354 |
'assessments': {}
|
355 |
}
|
356 |
|
357 |
-
# Extract student info
|
358 |
student_info_match = re.search(r"(\d{7}) - (.*?)\n", text)
|
359 |
if student_info_match:
|
360 |
parsed_data['student_info']['id'] = student_info_match.group(1)
|
361 |
parsed_data['student_info']['name'] = student_info_match.group(2).strip()
|
362 |
|
363 |
-
#
|
364 |
-
|
365 |
-
if
|
366 |
-
parsed_data['student_info']['grade'] =
|
367 |
-
|
368 |
-
yog_match = re.search(r"YOG\s*(\d{4})", text
|
369 |
if yog_match:
|
370 |
parsed_data['student_info']['year_of_graduation'] = yog_match.group(1)
|
371 |
|
372 |
-
#
|
373 |
-
gpa_matches = re.findall(r"(?:
|
374 |
if len(gpa_matches) >= 1:
|
375 |
parsed_data['student_info']['unweighted_gpa'] = float(gpa_matches[0])
|
376 |
if len(gpa_matches) >= 2:
|
377 |
parsed_data['student_info']['weighted_gpa'] = float(gpa_matches[1])
|
378 |
|
379 |
-
#
|
380 |
-
service_hours_match = re.search(r"
|
381 |
if service_hours_match:
|
382 |
parsed_data['student_info']['community_service_hours'] = int(service_hours_match.group(1))
|
383 |
-
|
384 |
-
service_date_match = re.search(r"
|
385 |
if service_date_match:
|
386 |
parsed_data['student_info']['community_service_date'] = service_date_match.group(1)
|
387 |
|
388 |
-
#
|
389 |
-
credits_match = re.search(r"
|
390 |
if credits_match:
|
391 |
parsed_data['student_info']['total_credits'] = float(credits_match.group(1))
|
392 |
|
393 |
-
#
|
394 |
-
virtual_grade_match = re.search(r"
|
395 |
if virtual_grade_match:
|
396 |
parsed_data['student_info']['virtual_grade'] = virtual_grade_match.group(1)
|
397 |
|
398 |
-
# Extract requirements
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
|
|
|
|
|
|
|
|
|
|
422 |
|
423 |
-
#
|
424 |
-
|
425 |
-
|
426 |
-
if
|
427 |
-
|
428 |
-
|
|
|
|
|
|
|
|
|
|
|
429 |
|
430 |
-
# Extract course history
|
431 |
-
|
432 |
-
if
|
433 |
-
course_lines = [line.strip() for line in
|
434 |
for line in course_lines:
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
course['credits']
|
452 |
-
|
|
|
453 |
|
454 |
return parsed_data
|
455 |
|
|
|
184 |
def preprocess_text(text: str) -> str:
|
185 |
"""Normalize text for more reliable parsing"""
|
186 |
text = re.sub(r'\s+', ' ', text) # Normalize whitespace
|
|
|
187 |
text = text.upper() # Standardize case for certain fields
|
188 |
return text
|
189 |
|
|
|
197 |
import pdfplumber
|
198 |
with pdfplumber.open(file_path) as pdf:
|
199 |
for page in pdf.pages:
|
200 |
+
# Try to extract tables first
|
201 |
+
tables = page.extract_tables({
|
202 |
+
"vertical_strategy": "text",
|
203 |
+
"horizontal_strategy": "text",
|
204 |
+
"intersection_y_tolerance": 10
|
205 |
+
})
|
206 |
+
|
207 |
if tables:
|
208 |
for table in tables:
|
209 |
+
for row in table:
|
210 |
+
text += " | ".join(str(cell).strip() for cell in row if cell) + "\n"
|
211 |
+
|
212 |
+
# Fall back to text extraction if tables are empty
|
|
|
213 |
page_text = page.extract_text()
|
214 |
if page_text:
|
215 |
text += page_text + "\n"
|
216 |
+
|
217 |
if not text.strip():
|
218 |
raise ValueError("PDFPlumber returned empty text")
|
219 |
+
|
220 |
except Exception as e:
|
221 |
logging.warning(f"PDFPlumber failed: {str(e)}. Trying PyMuPDF...")
|
222 |
doc = fitz.open(file_path)
|
223 |
for page in doc:
|
224 |
text += page.get_text("text") + '\n'
|
225 |
+
|
|
|
|
|
|
|
226 |
elif file_ext in ['.png', '.jpg', '.jpeg']:
|
227 |
text = extract_text_with_ocr(file_path)
|
228 |
|
|
|
235 |
|
236 |
except Exception as e:
|
237 |
logging.error(f"Text extraction error: {str(e)}")
|
238 |
+
raise ValueError(f"Failed to extract text: {str(e)}")
|
239 |
|
240 |
def extract_text_from_pdf_with_ocr(file_path: str) -> str:
|
241 |
try:
|
|
|
273 |
raise ValueError(f"OCR processing failed: {str(e)}")
|
274 |
|
275 |
def clean_extracted_text(text: str) -> str:
|
276 |
+
"""Special cleaning for Miami-Dade transcripts"""
|
277 |
+
# Normalize whitespace
|
278 |
text = re.sub(r'\s+', ' ', text).strip()
|
279 |
+
|
280 |
+
# Fix common OCR errors
|
281 |
replacements = {
|
282 |
+
'GradeLv1': 'GradeLvl',
|
283 |
+
'CrsNu m': 'CrsNum',
|
284 |
+
'YOG': 'Year of Graduation',
|
285 |
+
'Comm Serv': 'Community Service',
|
286 |
+
r'\bA\s*-\s*': 'A-', # Fix requirement codes
|
287 |
+
r'\bB\s*-\s*': 'B-',
|
288 |
+
r'\bC\s*-\s*': 'C-',
|
289 |
+
r'\bD\s*-\s*': 'D-',
|
290 |
+
r'\bE\s*-\s*': 'E-',
|
291 |
+
r'\bF\s*-\s*': 'F-',
|
292 |
+
r'\bG\s*-\s*': 'G-',
|
293 |
+
r'\bZ\s*-\s*': 'Z-'
|
294 |
}
|
295 |
+
|
296 |
+
for pattern, replacement in replacements.items():
|
297 |
+
text = re.sub(pattern, replacement, text)
|
298 |
+
|
299 |
+
# Fix course codes with spaces
|
300 |
+
text = re.sub(r'(\b[A-Z]{2,4})\s(\d{3}[A-Z]?\b)', r'\1\2', text)
|
301 |
+
|
302 |
+
# Fix common OCR errors in credits
|
303 |
+
text = re.sub(r'in\s*Progress', 'inProgress', text, flags=re.IGNORECASE)
|
304 |
+
|
305 |
return text
|
306 |
|
307 |
def remove_sensitive_info(text: str) -> str:
|
|
|
364 |
raise ValueError(f"Couldn't parse transcript content. Error: {str(e)}")
|
365 |
|
366 |
def _parse_detailed_transcript(self, text: str) -> Optional[Dict]:
|
367 |
+
"""Parse detailed transcript format with improved patterns for Miami-Dade format"""
|
368 |
try:
|
369 |
parsed_data = {
|
370 |
'student_info': {},
|
|
|
373 |
'assessments': {}
|
374 |
}
|
375 |
|
376 |
+
# Extract student info
|
377 |
student_info_match = re.search(r"(\d{7}) - (.*?)\n", text)
|
378 |
if student_info_match:
|
379 |
parsed_data['student_info']['id'] = student_info_match.group(1)
|
380 |
parsed_data['student_info']['name'] = student_info_match.group(2).strip()
|
381 |
|
382 |
+
# Extract grade and year info
|
383 |
+
grade_match = re.search(r"Current Grade:\s*(\d+)", text)
|
384 |
+
if grade_match:
|
385 |
+
parsed_data['student_info']['grade'] = grade_match.group(1)
|
386 |
+
|
387 |
+
yog_match = re.search(r"YOG\s*(\d{4})", text)
|
388 |
if yog_match:
|
389 |
parsed_data['student_info']['year_of_graduation'] = yog_match.group(1)
|
390 |
|
391 |
+
# Extract GPA information
|
392 |
+
gpa_matches = re.findall(r"(?:Un-weighted|Weighted)\s*GPA\s*([\d.]+)", text)
|
393 |
if len(gpa_matches) >= 1:
|
394 |
parsed_data['student_info']['unweighted_gpa'] = float(gpa_matches[0])
|
395 |
if len(gpa_matches) >= 2:
|
396 |
parsed_data['student_info']['weighted_gpa'] = float(gpa_matches[1])
|
397 |
|
398 |
+
# Extract community service info
|
399 |
+
service_hours_match = re.search(r"Comm\s*Serv\s*Hours\s*(\d+)", text, re.IGNORECASE)
|
400 |
if service_hours_match:
|
401 |
parsed_data['student_info']['community_service_hours'] = int(service_hours_match.group(1))
|
402 |
+
|
403 |
+
service_date_match = re.search(r"Comm\s*Serv\s*Date\s*(\d{2}/\d{2}/\d{4})", text, re.IGNORECASE)
|
404 |
if service_date_match:
|
405 |
parsed_data['student_info']['community_service_date'] = service_date_match.group(1)
|
406 |
|
407 |
+
# Extract credits info
|
408 |
+
credits_match = re.search(r"Total\s*Credits\s*Earned\s*([\d.]+)", text, re.IGNORECASE)
|
409 |
if credits_match:
|
410 |
parsed_data['student_info']['total_credits'] = float(credits_match.group(1))
|
411 |
|
412 |
+
# Extract virtual grade
|
413 |
+
virtual_grade_match = re.search(r"Virtual\s*Grade\s*([A-Z])", text, re.IGNORECASE)
|
414 |
if virtual_grade_match:
|
415 |
parsed_data['student_info']['virtual_grade'] = virtual_grade_match.group(1)
|
416 |
|
417 |
+
# Extract requirements - specific to this format
|
418 |
+
req_section = re.search(r"Code\s*Description\s*Required\s*Waived\s*Completed\s*Status(.*?)(?:\n\s*\n|$)", text, re.DOTALL)
|
419 |
+
if req_section:
|
420 |
+
req_lines = [line.strip() for line in req_section.group(1).split('\n') if line.strip()]
|
421 |
+
for line in req_lines:
|
422 |
+
if '|' in line: # Table format
|
423 |
+
parts = [part.strip() for part in line.split('|')]
|
424 |
+
if len(parts) >= 6:
|
425 |
+
code = parts[0]
|
426 |
+
description = parts[1]
|
427 |
+
required = float(parts[2]) if parts[2] and parts[2].replace('.','').isdigit() else 0.0
|
428 |
+
waived = float(parts[3]) if parts[3] and parts[3].replace('.','').isdigit() else 0.0
|
429 |
+
completed = float(parts[4]) if parts[4] and parts[4].replace('.','').isdigit() else 0.0
|
430 |
+
status = parts[5]
|
431 |
+
|
432 |
+
# Extract percentage if available
|
433 |
+
percent = 0.0
|
434 |
+
percent_match = re.search(r"(\d+)%", status)
|
435 |
+
if percent_match:
|
436 |
+
percent = float(percent_match.group(1))
|
437 |
+
|
438 |
+
parsed_data['requirements'][code] = {
|
439 |
+
"description": description,
|
440 |
+
"required": required,
|
441 |
+
"waived": waived,
|
442 |
+
"completed": completed,
|
443 |
+
"percent_complete": percent,
|
444 |
+
"status": status
|
445 |
+
}
|
446 |
|
447 |
+
# Extract assessments
|
448 |
+
assess_section = re.search(r"Z-Assessment.*?\n(.*?)(?:\n\s*\n|$)", text, re.DOTALL)
|
449 |
+
if assess_section:
|
450 |
+
assess_lines = [line.strip() for line in assess_section.group(1).split('\n') if line.strip()]
|
451 |
+
for line in assess_lines:
|
452 |
+
if '|' in line:
|
453 |
+
parts = [part.strip() for part in line.split('|')]
|
454 |
+
if len(parts) >= 5 and parts[0].startswith('Z-'):
|
455 |
+
name = parts[0].replace('Z-', '').strip()
|
456 |
+
status = parts[4]
|
457 |
+
parsed_data['assessments'][name] = status
|
458 |
|
459 |
+
# Extract course history - specific to this format
|
460 |
+
course_section = re.search(r"Requirement\s*School Year\s*GradeLv1\s*CrsNum\s*Description\s*Term\s*DstNumber\s*FG\s*Incl\s*Credits(.*?)(?:\n\s*\n|$)", text, re.DOTALL)
|
461 |
+
if course_section:
|
462 |
+
course_lines = [line.strip() for line in course_section.group(1).split('\n') if line.strip()]
|
463 |
for line in course_lines:
|
464 |
+
if '|' in line:
|
465 |
+
parts = [part.strip() for part in line.split('|')]
|
466 |
+
if len(parts) >= 9:
|
467 |
+
course = {
|
468 |
+
'requirement': parts[0],
|
469 |
+
'school_year': parts[1],
|
470 |
+
'grade_level': parts[2],
|
471 |
+
'course_code': parts[3],
|
472 |
+
'description': parts[4],
|
473 |
+
'term': parts[5],
|
474 |
+
'district_number': parts[6],
|
475 |
+
'fg': parts[7],
|
476 |
+
'included': parts[8],
|
477 |
+
'credits': parts[9] if len(parts) > 9 else "0"
|
478 |
+
}
|
479 |
+
# Handle inProgress credits
|
480 |
+
if "inProgress" in course['credits'].lower():
|
481 |
+
course['credits'] = "0"
|
482 |
+
parsed_data['course_history'].append(course)
|
483 |
|
484 |
return parsed_data
|
485 |
|