Spaces:
Sleeping
Sleeping
File size: 8,824 Bytes
f330df4 27a375e f330df4 27a375e f330df4 ff610ff 27a375e fd970b6 27a375e fd970b6 f330df4 ff610ff 27a375e ff610ff 27a375e f330df4 27a375e ff610ff 27a375e f330df4 ff610ff f330df4 ff610ff 27a375e ff610ff f330df4 27a375e ff610ff 471f1d3 ff610ff 6255a6d f330df4 6255a6d 52ebfdc 6255a6d 52ebfdc 6255a6d 15c9ede 27a375e 6255a6d 27a375e 6255a6d 27a375e fd970b6 aca59c0 fd970b6 aca59c0 6255a6d 27a375e 6255a6d 27a375e ff610ff 6255a6d 27a375e 6255a6d 27a375e 6255a6d 27a375e ff610ff 27a375e 6255a6d 27a375e 6255a6d 27a375e ff610ff 27a375e aca59c0 27a375e ff610ff 27a375e ff610ff f330df4 aca59c0 6255a6d aca59c0 f330df4 27a375e 4cfc47d 27a375e f330df4 ff610ff 27a375e 4cfc47d 27a375e 4cfc47d 6255a6d 4cfc47d 27a375e 4cfc47d 27a375e 4cfc47d f330df4 27a375e f330df4 27a375e f330df4 27a375e 6255a6d 27a375e 6255a6d ff610ff f330df4 ff610ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 |
import os
import re
import json
import gradio as gr
import pandas as pd
import pdfplumber
import pytesseract
from pdf2image import convert_from_path
from huggingface_hub import InferenceClient
# Initialize with a reliable free model that supports text-generation
hf_token = os.getenv("HF_TOKEN")
client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.2", token=hf_token)
def extract_excel_data(file_path):
"""Extract text from Excel file"""
df = pd.read_excel(file_path, engine='openpyxl')
return df.to_string(index=False)
def extract_text_from_pdf(pdf_path, is_scanned=False):
"""Extract text from PDF with fallback OCR"""
try:
# Try native PDF extraction first
with pdfplumber.open(pdf_path) as pdf:
text = ""
for page in pdf.pages:
text += page.extract_text() + "\n"
return text
except Exception as e:
print(f"Native PDF extraction failed: {str(e)}")
# Fallback to OCR for scanned PDFs
images = convert_from_path(pdf_path, dpi=200)
text = ""
for image in images:
text += pytesseract.image_to_string(image) + "\n"
return text
def parse_bank_statement(text):
"""Parse bank statement using LLM with fallback to rule-based parser"""
cleaned_text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
print(f"Original text sample: {cleaned_text[:200]}...")
# Craft precise prompt with strict JSON formatting instructions
prompt = f"""
<|system|>
You are a financial data parser. Extract transactions from bank statements and return ONLY valid JSON.
</s>
<|user|>
Extract all transactions from this bank statement with these exact fields:
- date (format: YYYY-MM-DD)
- description
- amount (format: 0.00)
- debit (format: 0.00)
- credit (format: 0.00)
- closing_balance (format: 0.00 or -0.00 for negative)
- category
Statement text:
{cleaned_text[:3000]} [truncated if too long]
Return JSON with this exact structure:
{{
"transactions": [
{{
"date": "2025-05-08",
"description": "Company XYZ Payroll",
"amount": "8315.40",
"debit": "0.00",
"credit": "8315.40",
"closing_balance": "38315.40",
"category": "Salary"
}},
{{
"date": "2025-05-19",
"description": "Whole Foods",
"amount": "142.21",
"debit": "142.21",
"credit": "0.00",
"closing_balance": "38173.19",
"category": "Groceries"
}}
]
}}
RULES:
1. Output ONLY the JSON object with no additional text
2. Keep amounts as strings with 2 decimal places
3. For missing values, use empty strings
4. Convert negative amounts to format "-123.45"
5. Map categories to: Salary, Groceries, Medical, Utilities, Entertainment, Dining, Misc
</s>
<|assistant|>
"""
try:
# Call LLM via Hugging Face Inference API
response = client.text_generation(
prompt,
max_new_tokens=2000,
temperature=0.01,
stop_sequences=["</s>"]
)
print(f"LLM Response: {response}")
# Validate and clean JSON response
response = response.strip()
if not response.startswith('{'):
# Find the first { and last } to extract JSON
start_idx = response.find('{')
end_idx = response.rfind('}')
if start_idx != -1 and end_idx != -1:
response = response[start_idx:end_idx+1]
# Parse JSON and validate structure
data = json.loads(response)
if "transactions" not in data:
raise ValueError("Missing 'transactions' key in JSON")
return data
except Exception as e:
print(f"LLM Error: {str(e)}")
# Fallback to rule-based parser
return rule_based_parser(cleaned_text)
def rule_based_parser(text):
"""Enhanced fallback parser for structured tables"""
lines = [line.strip() for line in text.split('\n') if line.strip()]
# Find header line - more flexible detection
header_index = None
header_patterns = [
r'Date\b', r'Description\b', r'Amount\b',
r'Debit\b', r'Credit\b', r'Closing\s*Balance\b', r'Category\b'
]
for i, line in enumerate(lines):
if all(re.search(pattern, line, re.IGNORECASE) for pattern in header_patterns):
header_index = i
break
if header_index is None:
# Try pipe-delimited format as fallback
for i, line in enumerate(lines):
if '|' in line and any(p in line for p in ['Date', 'Amount', 'Balance']):
header_index = i
break
if header_index is None or header_index + 1 >= len(lines):
return {"transactions": []}
data_lines = lines[header_index + 1:]
transactions = []
for line in data_lines:
# Handle both pipe-delimited and space-aligned formats
if '|' in line:
parts = [p.strip() for p in line.split('|') if p.strip()]
else:
# Space-aligned format - split by 2+ spaces
parts = re.split(r'\s{2,}', line)
if len(parts) < 7:
continue
try:
transactions.append({
"date": parts[0],
"description": parts[1],
"amount": format_number(parts[2]),
"debit": format_number(parts[3]),
"credit": format_number(parts[4]),
"closing_balance": format_number(parts[5]),
"category": parts[6]
})
except Exception as e:
print(f"Error parsing line: {str(e)}")
return {"transactions": transactions}
def format_number(value):
"""Format numeric values consistently"""
if not value:
return "0.00"
# Clean numeric values
value = value.replace(',', '').replace('$', '').strip()
# Handle negative numbers in parentheses
if '(' in value and ')' in value:
value = '-' + value.replace('(', '').replace(')', '')
# Standardize decimal format
if '.' not in value:
value += '.00'
# Ensure two decimal places
parts = value.split('.')
if len(parts) == 2:
integer = parts[0].lstrip('0') or '0'
decimal = parts[1][:2].ljust(2, '0')
value = f"{integer}.{decimal}"
# Handle negative signs
if value.startswith('-'):
return f"-{value[1:].lstrip('0')}" if value[1:] != '0.00' else '0.00'
return value
def process_file(file, is_scanned):
"""Main processing function"""
if not file:
return pd.DataFrame(columns=[
"Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"
])
file_path = file.name
file_ext = os.path.splitext(file_path)[1].lower()
try:
if file_ext == '.xlsx':
text = extract_excel_data(file_path)
elif file_ext == '.pdf':
text = extract_text_from_pdf(file_path, is_scanned=is_scanned)
else:
return pd.DataFrame(columns=[
"Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"
])
parsed_data = parse_bank_statement(text)
df = pd.DataFrame(parsed_data["transactions"])
# Ensure all required columns exist
required_cols = ["date", "description", "amount", "debit",
"credit", "closing_balance", "category"]
for col in required_cols:
if col not in df.columns:
df[col] = ""
# Format columns properly
df.columns = ["Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"]
return df
except Exception as e:
print(f"Processing error: {str(e)}")
# Return empty DataFrame with correct columns on error
return pd.DataFrame(columns=[
"Date", "Description", "Amount", "Debit",
"Credit", "Closing Balance", "Category"
])
# Gradio Interface
interface = gr.Interface(
fn=process_file,
inputs=[
gr.File(label="Upload Bank Statement (PDF/Excel)"),
gr.Checkbox(label="Is Scanned PDF? (Use OCR)")
],
outputs=gr.Dataframe(
label="Parsed Transactions",
headers=["Date", "Description", "Amount", "Debit", "Credit", "Closing Balance", "Category"],
datatype=["date", "str", "number", "number", "number", "number", "str"]
),
title="AI Bank Statement Parser",
description="Extract structured transaction data from PDF/Excel bank statements",
allow_flagging="never"
)
if __name__ == "__main__":
interface.launch() |