Bhaskar2611's picture
Update app.py
aca59c0 verified
raw
history blame
7.02 kB
import os
import re
import json
import gradio as gr
import pandas as pd
import pdfplumber
import pytesseract
from pdf2image import convert_from_path
from huggingface_hub import InferenceClient
# Initialize Hugging Face Inference Client with a free model
hf_token = os.getenv("HF_TOKEN")
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta", token=hf_token)
def extract_excel_data(file_path):
"""Extract text from Excel file"""
df = pd.read_excel(file_path, engine='openpyxl')
return df.to_string(index=False)
def extract_text_from_pdf(pdf_path, is_scanned=False):
"""Extract text from PDF with fallback OCR"""
try:
# Try native PDF extraction first
with pdfplumber.open(pdf_path) as pdf:
text = ""
for page in pdf.pages:
text += page.extract_text() + "\n"
return text
except Exception as e:
print(f"Native PDF extraction failed: {str(e)}")
# Fallback to OCR for scanned PDFs
images = convert_from_path(pdf_path, dpi=200)
text = ""
for image in images:
text += pytesseract.image_to_string(image) + "\n"
return text
def parse_bank_statement(text):
"""Parse bank statement using LLM with fallback to rule-based parser"""
cleaned_text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
print(f"Original text sample: {cleaned_text[:200]}...")
# Craft precise prompt for LLM with proper JSON escaping
prompt = f"""
You are a financial data parser. Extract transactions from bank statements.
Given this bank statement text:
{cleaned_text}
Extract all transactions with these fields:
- Date
- Description
- Amount
- Debit
- Credit
- Closing Balance
- Category
Return JSON with "transactions" array containing these fields.
Example format:
{{
"transactions": [
{{
"date": "2025-05-08",
"description": "Company XYZ Payroll",
"amount": "8315.40",
"debit": "0.00",
"credit": "8315.40",
"closing_balance": "38315.40",
"category": "Salary"
}},
{{
"date": "2025-05-19",
"description": "Whole Foods",
"amount": "142.21",
"debit": "142.21",
"credit": "0.00",
"closing_balance": "38173.19",
"category": "Groceries"
}}
]
}}
Rules:
1. Ensure numeric fields have valid numbers (e.g., "0.00" instead of "-")
2. Convert negative balances to standard format (e.g., "-2421.72")
3. Map category names consistently (e.g., "Groceries", "Medical", "Utilities")
4. Only return valid JSON with no additional text
"""
try:
# Call LLM via Hugging Face Inference API
response = client.text_generation(
prompt,
max_new_tokens=2000,
temperature=0.1,
stop_sequences=["</s>"]
)
print(f"LLM Response: {response}")
# Extract JSON from response (remove non-JSON prefixes/suffixes)
json_match = re.search(r'\{.*\}', response, re.DOTALL)
if json_match:
return json.loads(json_match.group())
return json.loads(response)
except Exception as e:
print(f"LLM Error: {str(e)}")
# Fallback to rule-based parser
return rule_based_parser(cleaned_text)
def rule_based_parser(text):
"""Fallback parser for structured tables with pipe delimiters"""
lines = [line.strip() for line in text.split('\n') if line.strip()]
# Find header line containing '| Date'
header_index = None
for i, line in enumerate(lines):
if re.search(r'\|Date|Date\|', line, re.IGNORECASE):
header_index = i
break
if header_index is None or header_index + 1 >= len(lines):
return {"transactions": []}
data_lines = lines[header_index + 1:]
transactions = []
for line in data_lines:
if not '|' in line:
continue
parts = [p.strip() for p in line.split('|') if p.strip()]
if len(parts) < 7:
continue
try:
# Handle numeric values consistently
transactions.append({
"date": parts[0],
"description": parts[1],
"amount": format_number(parts[2]),
"debit": format_number(parts[3]),
"credit": format_number(parts[4]),
"closing_balance": format_number(parts[5]),
"category": parts[6]
})
except Exception as e:
print(f"Error parsing line: {str(e)}")
return {"transactions": transactions}
def format_number(value):
"""Format numeric values consistently"""
value = value.replace(',', '')
if re.match(r'^-?\d+(\.\d+)?$', value):
return f"{float(value):.2f}"
return value
def process_file(file, is_scanned):
"""Main processing function"""
if not file:
return pd.DataFrame(columns=[
"Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"
])
file_path = file.name
file_ext = os.path.splitext(file_path)[1].lower()
try:
if file_ext == '.xlsx':
text = extract_excel_data(file_path)
elif file_ext == '.pdf':
text = extract_text_from_pdf(file_path, is_scanned=is_scanned)
else:
return pd.DataFrame(columns=[
"Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"
])
parsed_data = parse_bank_statement(text)
df = pd.DataFrame(parsed_data["transactions"])
# Ensure all required columns exist
required_cols = ["date", "description", "amount", "debit",
"credit", "closing_balance", "category"]
for col in required_cols:
if col not in df.columns:
df[col] = ""
df.columns = ["Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"]
return df
except Exception as e:
print(f"Processing error: {str(e)}")
# Return empty DataFrame with correct columns on error
return pd.DataFrame(columns=[
"Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"
])
# Gradio Interface
interface = gr.Interface(
fn=process_file,
inputs=[
gr.File(label="Upload Bank Statement (PDF/Excel)"),
gr.Checkbox(label="Is Scanned PDF? (Use OCR)")
],
outputs=gr.Dataframe(
label="Parsed Transactions",
headers=["Date", "Description", "Amount", "Debit", "Credit", "Closing Balance", "Category"]
),
title="AI Bank Statement Parser",
description="Extract structured transaction data from PDF/Excel bank statements using LLM and hybrid parsing techniques.",
allow_flagging="never"
)
if __name__ == "__main__":
interface.launch()