TKgumi commited on
Commit
9e1bf97
·
verified ·
1 Parent(s): 18026e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -46
app.py CHANGED
@@ -1,56 +1,52 @@
1
  import os
 
2
  import cv2
3
  import pytesseract
4
  from pdf2image import convert_from_path
 
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
7
- # --- 1. OCRで決算短信PDFからテキスト抽出 ---
8
- # PDFファイル名
9
- pdf_path = "kessan.pdf"
10
-
11
- # PDFを画像に変換(1ページごとにリストへ)
12
- images = convert_from_path(pdf_path)
13
-
14
- # Tesseractのパス設定(必要な場合、環境に合わせて変更)
15
- # 例: Windowsの場合
16
- # pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
17
-
18
- extracted_text = ""
19
-
20
- for i, image in enumerate(images):
21
- # PillowのImageオブジェクトをOpenCV形式に変換
22
- image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # 前処理: グレースケール化、二値化(OTSU)やノイズ除去などを必要に応じて追加
25
- gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)
26
- thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
27
- # OCR処理(日本語対応の場合はlang="jpn"を指定)
28
- text = pytesseract.image_to_string(thresh, lang="jpn")
29
- extracted_text += text + "\n"
30
-
31
- print("OCR抽出完了。")
32
-
33
- # --- 2. 抽出テキストをLLMへ入力して要約生成 ---
34
- # Hugging Faceの蒸留済みLLM DeepSeek-Coder-1.3B の利用例
35
- model_name = "deepseek-ai/deepseek-coder-1.3b"
36
-
37
- # モデルとトークナイザーのロード
38
- tokenizer = AutoTokenizer.from_pretrained(model_name)
39
- model = AutoModelForCausalLM.from_pretrained(model_name)
40
-
41
- # プロンプト作成(必要に応じて調整)
42
- prompt = (
43
- "以下の決算短信の内容を要約し、投資家向けに分かりやすく説明してください:\n\n" +
44
- extracted_text
45
- )
46
-
47
- # トークナイズ
48
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=4096)
49
 
50
- # 生成(max_lengthやその他のパラメータは必要に応じて調整)
51
- output_ids = model.generate(inputs.input_ids, max_length=512, num_beams=5, early_stopping=True)
52
- summary = tokenizer.decode(output_ids[0], skip_special_tokens=True)
53
 
54
- print("\n=== 要約結果 ===")
55
- print(summary)
56
 
 
 
1
  import os
2
+ import numpy as np
3
  import cv2
4
  import pytesseract
5
  from pdf2image import convert_from_path
6
+ from fastapi import FastAPI, UploadFile, File
7
  from transformers import AutoTokenizer, AutoModelForCausalLM
8
 
9
+ app = FastAPI()
10
+
11
+ # Hugging Face LLMの設定
12
+ MODEL_NAME = "deepseek-ai/deepseek-coder-1.3b"
13
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
14
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
15
+
16
+ # OCR関数(決算短信PDF -> テキスト変換)
17
+ def extract_text_from_pdf(pdf_path: str) -> str:
18
+ images = convert_from_path(pdf_path)
19
+ extracted_text = ""
20
+
21
+ for image in images:
22
+ img_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
23
+ gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
24
+ thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
25
+ text = pytesseract.image_to_string(thresh, lang="jpn")
26
+ extracted_text += text + "\n"
27
+
28
+ return extracted_text
29
+
30
+ # LLM要約関数(決算短信のOCRテキスト -> 投資家向け要約)
31
+ def summarize_text(text: str) -> str:
32
+ prompt = f"以下の決算短信を投資家向けに要約してください:\n{text}"
33
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=4096)
34
+ output_ids = model.generate(inputs.input_ids, max_length=512, num_beams=5, early_stopping=True)
35
+ return tokenizer.decode(output_ids[0], skip_special_tokens=True)
36
+
37
+ # FastAPIエンドポイント(ファイルアップロード & OCR処理 & LLM要約)
38
+ @app.post("/upload/")
39
+ async def upload_pdf(file: UploadFile = File(...)):
40
+ file_path = f"/tmp/{file.filename}"
41
 
42
+ # ファイルを保存
43
+ with open(file_path, "wb") as f:
44
+ f.write(await file.read())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ # OCRでテキスト抽出
47
+ extracted_text = extract_text_from_pdf(file_path)
 
48
 
49
+ # LLMで要約
50
+ summary = summarize_text(extracted_text)
51
 
52
+ return {"summary": summary}