dmitrynovikov2121 commited on
Commit
f280e03
·
verified ·
1 Parent(s): d6367a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +124 -235
app.py CHANGED
@@ -1,223 +1,11 @@
1
- # Copyright (c) Opendatalab. All rights reserved.
2
-
3
- import base64
4
- import json
5
- import os
6
- import time
7
- import zipfile
8
- from pathlib import Path
9
- import re
10
- import uuid
11
- import pymupdf
12
- from io import BytesIO
13
- from fastapi import FastAPI, File, UploadFile
14
- from fastapi.responses import JSONResponse
15
- import uvicorn
16
- from PyPDF2 import PdfReader
17
-
18
- # Initialize FastAPI app
19
- app = FastAPI()
20
-
21
- # Setup and installation commands
22
- os.system('pip uninstall -y magic-pdf')
23
- os.system('pip install git+https://github.com/opendatalab/MinerU.git@dev')
24
- os.system('wget https://github.com/opendatalab/MinerU/raw/dev/scripts/download_models_hf.py -O download_models_hf.py')
25
- os.system('python download_models_hf.py')
26
-
27
- # Configure magic-pdf settings
28
- with open('/home/user/magic-pdf.json', 'r') as file:
29
- data = json.load(file)
30
-
31
- data['device-mode'] = "cuda"
32
- if os.getenv('apikey'):
33
- data['llm-aided-config']['title_aided']['api_key'] = os.getenv('apikey')
34
- data['llm-aided-config']['title_aided']['enable'] = True
35
-
36
- with open('/home/user/magic-pdf.json', 'w') as file:
37
- json.dump(data, file, indent=4)
38
-
39
- os.system('cp -r paddleocr /home/user/.paddleocr')
40
-
41
- # Import required modules
42
- from magic_pdf.data.data_reader_writer import FileBasedDataReader
43
- from magic_pdf.libs.hash_utils import compute_sha256
44
- from magic_pdf.tools.common import do_parse, prepare_env
45
- from loguru import logger
46
-
47
- def read_fn(path):
48
- disk_rw = FileBasedDataReader(os.path.dirname(path))
49
- return disk_rw.read(os.path.basename(path))
50
-
51
- def read_fn(path):
52
- disk_rw = FileBasedDataReader(os.path.dirname(path))
53
- return disk_rw.read(os.path.basename(path))
54
-
55
-
56
- def parse_pdf(doc_path, output_dir, end_page_id, is_ocr, layout_mode, formula_enable, table_enable, language):
57
- os.makedirs(output_dir, exist_ok=True)
58
-
59
- try:
60
- file_name = f"{str(Path(doc_path).stem)}_{time.time()}"
61
- pdf_data = read_fn(doc_path)
62
- if is_ocr:
63
- parse_method = "ocr"
64
- else:
65
- parse_method = "auto"
66
- local_image_dir, local_md_dir = prepare_env(output_dir, file_name, parse_method)
67
- do_parse(
68
- output_dir,
69
- file_name,
70
- pdf_data,
71
- [],
72
- parse_method,
73
- False,
74
- end_page_id=end_page_id,
75
- layout_model=layout_mode,
76
- formula_enable=formula_enable,
77
- table_enable=table_enable,
78
- lang=language,
79
- f_dump_orig_pdf=False,
80
- )
81
- return local_md_dir, file_name
82
- except Exception as e:
83
- logger.exception(e)
84
-
85
-
86
- def compress_directory_to_zip(directory_path, output_zip_path):
87
- """
88
- 压缩指定目录到一个 ZIP 文件。
89
-
90
- :param directory_path: 要压缩的目录路径
91
- :param output_zip_path: 输出的 ZIP 文件路径
92
- """
93
- try:
94
- with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
95
-
96
- # 遍历目录中的所有文件和子目录
97
- for root, dirs, files in os.walk(directory_path):
98
- for file in files:
99
- # 构建完整的文件路径
100
- file_path = os.path.join(root, file)
101
- # 计算相对路径
102
- arcname = os.path.relpath(file_path, directory_path)
103
- # 添加文件到 ZIP 文件
104
- zipf.write(file_path, arcname)
105
- return 0
106
- except Exception as e:
107
- logger.exception(e)
108
- return -1
109
-
110
-
111
- def image_to_base64(image_path):
112
- with open(image_path, "rb") as image_file:
113
- return base64.b64encode(image_file.read()).decode('utf-8')
114
-
115
-
116
- def replace_image_with_base64(markdown_text, image_dir_path):
117
- # 匹配Markdown中的图片标签
118
- pattern = r'\!\[(?:[^\]]*)\]\(([^)]+)\)'
119
-
120
- # 替换图片链接
121
- def replace(match):
122
- relative_path = match.group(1)
123
- full_path = os.path.join(image_dir_path, relative_path)
124
- base64_image = image_to_base64(full_path)
125
- return f"![{relative_path}](data:image/jpeg;base64,{base64_image})"
126
-
127
- # 应用替换
128
- return re.sub(pattern, replace, markdown_text)
129
-
130
-
131
- def to_markdown(file_path, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language):
132
- file_path = to_pdf(file_path)
133
- if end_pages > 20:
134
- end_pages = 20
135
- # 获取识别的md文件以及压缩包文件路径
136
- local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1, is_ocr,
137
- layout_mode, formula_enable, table_enable, language)
138
- archive_zip_path = os.path.join("./output", compute_sha256(local_md_dir) + ".zip")
139
- zip_archive_success = compress_directory_to_zip(local_md_dir, archive_zip_path)
140
- if zip_archive_success == 0:
141
- logger.info("压缩成功")
142
- else:
143
- logger.error("压缩失败")
144
- md_path = os.path.join(local_md_dir, file_name + ".md")
145
- with open(md_path, 'r', encoding='utf-8') as f:
146
- txt_content = f.read()
147
- md_content = replace_image_with_base64(txt_content, local_md_dir)
148
- # 返回转换后的PDF路径
149
- new_pdf_path = os.path.join(local_md_dir, file_name + "_layout.pdf")
150
-
151
- return md_content, txt_content, archive_zip_path, new_pdf_path
152
-
153
-
154
- latex_delimiters = [{"left": "$$", "right": "$$", "display": True},
155
- {"left": '$', "right": '$', "display": False}]
156
-
157
-
158
- def init_model():
159
- from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton
160
- try:
161
- model_manager = ModelSingleton()
162
- txt_model = model_manager.get_model(False, False)
163
- logger.info(f"txt_model init final")
164
- ocr_model = model_manager.get_model(True, False)
165
- logger.info(f"ocr_model init final")
166
- return 0
167
- except Exception as e:
168
- logger.exception(e)
169
- return -1
170
-
171
-
172
- model_init = init_model()
173
- logger.info(f"model_init: {model_init}")
174
-
175
-
176
- with open("header.html", "r") as file:
177
- header = file.read()
178
-
179
-
180
- latin_lang = [
181
- 'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', 'hr',
182
- 'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', 'mt', 'nl',
183
- 'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', 'sl', 'sq', 'sv',
184
- 'sw', 'tl', 'tr', 'uz', 'vi', 'french', 'german'
185
- ]
186
- arabic_lang = ['ar', 'fa', 'ug', 'ur']
187
- cyrillic_lang = [
188
- 'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', 'ava',
189
- 'dar', 'inh', 'che', 'lbe', 'lez', 'tab'
190
- ]
191
- devanagari_lang = [
192
- 'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', 'gom',
193
- 'sa', 'bgc'
194
- ]
195
- other_lang = ['ch', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka']
196
-
197
- all_lang = ['', 'auto']
198
- all_lang.extend([*other_lang, *latin_lang, *arabic_lang, *cyrillic_lang, *devanagari_lang])
199
-
200
-
201
- def to_pdf(file_path):
202
- with pymupdf.open(file_path) as f:
203
- if f.is_pdf:
204
- return file_path
205
- else:
206
- pdf_bytes = f.convert_to_pdf()
207
- # 将pdfbytes 写入到uuid.pdf中
208
- # 生成唯一的文件名
209
- unique_filename = f"{uuid.uuid4()}.pdf"
210
-
211
- # 构建完整的文件路径
212
- tmp_file_path = os.path.join(os.path.dirname(file_path), unique_filename)
213
-
214
- # 将字节数据写入文件
215
- with open(tmp_file_path, 'wb') as tmp_pdf_file:
216
- tmp_pdf_file.write(pdf_bytes)
217
-
218
- return tmp_file_path
219
-
220
-
221
 
222
  @app.post("/process_document")
223
  async def process_document(
@@ -230,13 +18,12 @@ async def process_document(
230
  language: str = "auto"
231
  ):
232
  try:
233
- # Save uploaded file temporarily
234
  temp_path = f"/tmp/{file.filename}"
235
  with open(temp_path, "wb") as buffer:
236
  content = await file.read()
237
  buffer.write(content)
238
-
239
- # Source 1: Using magic-pdf processing
240
  md_content, txt_content, archive_zip_path, new_pdf_path = to_markdown(
241
  temp_path,
242
  end_pages=end_pages,
@@ -248,7 +35,7 @@ async def process_document(
248
  )
249
  source_1 = txt_content
250
 
251
- # Source 2: Using PyPDF2
252
  def extract_text_from_pdf(doc_path):
253
  try:
254
  reader = PdfReader(doc_path)
@@ -258,24 +45,126 @@ async def process_document(
258
  return str(e)
259
 
260
  source_2 = extract_text_from_pdf(temp_path)
261
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  # Clean up
263
  os.remove(temp_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
  return JSONResponse({
266
- "source_1": source_1,
267
- "source_2": source_2
268
  })
269
 
270
  except Exception as e:
271
  return JSONResponse(
272
  status_code=500,
273
  content={"error": str(e)}
274
- )
275
-
276
- # Initialize models
277
- model_init = init_model()
278
- logger.info(f"model_init: {model_init}")
279
-
280
- if __name__ == "__main__":
281
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ # Add these imports
2
+ from pdfminer.high_level import extract_text
3
+ from pdfminer.layout import LAParams
4
+ import fitz # PyMuPDF
5
+ from transformers import LayoutLMv3Processor, LayoutLMv3ForSequenceClassification
6
+ import torch
7
+ from PIL import Image
8
+ import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  @app.post("/process_document")
11
  async def process_document(
 
18
  language: str = "auto"
19
  ):
20
  try:
 
21
  temp_path = f"/tmp/{file.filename}"
22
  with open(temp_path, "wb") as buffer:
23
  content = await file.read()
24
  buffer.write(content)
25
+
26
+ # Source 1: magic-pdf processing
27
  md_content, txt_content, archive_zip_path, new_pdf_path = to_markdown(
28
  temp_path,
29
  end_pages=end_pages,
 
35
  )
36
  source_1 = txt_content
37
 
38
+ # Source 2: PyPDF2
39
  def extract_text_from_pdf(doc_path):
40
  try:
41
  reader = PdfReader(doc_path)
 
45
  return str(e)
46
 
47
  source_2 = extract_text_from_pdf(temp_path)
48
+
49
+ # Source 3: PDFMiner
50
+ def extract_text_pdfminer(pdf_path):
51
+ try:
52
+ laparams = LAParams(
53
+ line_margin=0.5,
54
+ word_margin=0.1,
55
+ char_margin=2.0,
56
+ boxes_flow=0.5,
57
+ detect_vertical=True
58
+ )
59
+ text = extract_text(pdf_path, laparams=laparams)
60
+ return text
61
+ except Exception as e:
62
+ return str(e)
63
+
64
+ source_3 = extract_text_pdfminer(temp_path)
65
+
66
+ # Source 4: PyMuPDF (more precise for tables and structured content)
67
+ def extract_text_pymupdf(pdf_path):
68
+ try:
69
+ doc = fitz.open(pdf_path)
70
+ text = ""
71
+ for page_num in range(min(end_pages, doc.page_count)):
72
+ page = doc[page_num]
73
+ # Extract text with preserved formatting
74
+ blocks = page.get_text("blocks")
75
+ # Sort blocks by vertical position then horizontal
76
+ blocks.sort(key=lambda b: (b[1], b[0]))
77
+ for b in blocks:
78
+ text += b[4] + "\n"
79
+ doc.close()
80
+ return text
81
+ except Exception as e:
82
+ return str(e)
83
+
84
+ source_4 = extract_text_pymupdf(temp_path)
85
+
86
+ # Source 5: LayoutLMv3 for structured document understanding
87
+ def extract_text_layoutlm(pdf_path):
88
+ try:
89
+ # Initialize LayoutLMv3
90
+ processor = LayoutLMv3Processor.from_pretrained("microsoft/layoutlmv3-base")
91
+ model = LayoutLMv3ForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
92
+
93
+ # Convert PDF to images
94
+ doc = fitz.open(pdf_path)
95
+ text_results = []
96
+
97
+ for page_num in range(min(end_pages, doc.page_count)):
98
+ page = doc[page_num]
99
+ pix = page.get_pixmap()
100
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
101
+
102
+ # Process image through LayoutLMv3
103
+ encoding = processor(img, return_tensors="pt")
104
+ with torch.no_grad():
105
+ outputs = model(**encoding)
106
+
107
+ # Extract text with layout information
108
+ text = page.get_text("dict")
109
+ blocks = text["blocks"]
110
+ structured_text = ""
111
+ for block in blocks:
112
+ if "lines" in block:
113
+ for line in block["lines"]:
114
+ if "spans" in line:
115
+ for span in line["spans"]:
116
+ structured_text += span["text"] + " "
117
+ text_results.append(structured_text)
118
+
119
+ doc.close()
120
+ return "\n".join(text_results)
121
+ except Exception as e:
122
+ return str(e)
123
+
124
+ source_5 = extract_text_layoutlm(temp_path)
125
+
126
  # Clean up
127
  os.remove(temp_path)
128
+
129
+ # Compare and validate results
130
+ def validate_results(sources):
131
+ # Basic validation checks
132
+ validated_results = {}
133
+ for idx, source in sources.items():
134
+ # Check for common banking keywords
135
+ banking_keywords = ['balance', 'deposit', 'withdrawal', 'transaction', 'account']
136
+ keyword_presence = sum(1 for keyword in banking_keywords if keyword.lower() in source.lower())
137
+
138
+ # Check for number patterns (amounts)
139
+ amount_pattern = r'\$?\d{1,3}(?:,\d{3})*(?:\.\d{2})?'
140
+ amounts_found = len(re.findall(amount_pattern, source))
141
+
142
+ # Check for date patterns
143
+ date_pattern = r'\d{1,2}[-/]\d{1,2}[-/]\d{2,4}'
144
+ dates_found = len(re.findall(date_pattern, source))
145
+
146
+ validated_results[idx] = {
147
+ 'text': source,
148
+ 'confidence_score': (keyword_presence + amounts_found + dates_found) / 10,
149
+ 'amounts_found': amounts_found,
150
+ 'dates_found': dates_found
151
+ }
152
+ return validated_results
153
+
154
+ validated_sources = validate_results({
155
+ 'source_1': source_1,
156
+ 'source_2': source_2,
157
+ 'source_3': source_3,
158
+ 'source_4': source_4,
159
+ 'source_5': source_5
160
+ })
161
 
162
  return JSONResponse({
163
+ "sources": validated_sources
 
164
  })
165
 
166
  except Exception as e:
167
  return JSONResponse(
168
  status_code=500,
169
  content={"error": str(e)}
170
+ )