Spaces:
Running
Running
Upload 4 files
Browse files- .gitattributes +38 -35
- README.md +14 -14
- app.py +182 -0
- requirements.txt +2 -0
.gitattributes
CHANGED
@@ -1,35 +1,38 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
-
---
|
2 |
-
title: RapidTableDetection
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.6.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
short_description: extract table form complex bg
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: RapidTableDetection
|
3 |
+
emoji: 🏢
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.6.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: extract table form complex bg
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
from rapid_table_det.inference import TableDetector
|
5 |
+
from rapid_table_det.utils.visuallize import img_loader, visuallize, extract_table_img
|
6 |
+
|
7 |
+
example_images = [
|
8 |
+
"images/doc1.png",
|
9 |
+
"images/doc2.jpg",
|
10 |
+
"images/doc3.jpg",
|
11 |
+
"images/doc4.jpg",
|
12 |
+
"images/doc5.jpg",
|
13 |
+
"images/real1.jpg",
|
14 |
+
"images/real2.jpeg",
|
15 |
+
"images/real3.jpg",
|
16 |
+
"images/real4.jpg",
|
17 |
+
"images/real5.jpg"
|
18 |
+
]
|
19 |
+
|
20 |
+
# 定义模型类型选项
|
21 |
+
model_type_options = {
|
22 |
+
"YOLO 目标检测": ["yolo_obj_det"],
|
23 |
+
"Paddle 目标检测": ["paddle_obj_det"],
|
24 |
+
"Paddle 目标检测 (量化)": ["paddle_obj_det_s"],
|
25 |
+
"YOLO 语义分割": ["yolo_edge_det"],
|
26 |
+
"YOLO 语义分割 (小型)": ["yolo_edge_det_s"],
|
27 |
+
"Paddle 语义分割": ["paddle_edge_det"],
|
28 |
+
"Paddle 语义分割 (量化)": ["paddle_edge_det_s"],
|
29 |
+
"Paddle 方向分类": ["paddle_cls_det"]
|
30 |
+
}
|
31 |
+
|
32 |
+
# 预生成所有可能的 TableDetector 实例
|
33 |
+
preinitialized_detectors = {}
|
34 |
+
|
35 |
+
for obj_model_type in model_type_options["YOLO 目标检测"] + model_type_options["Paddle 目标检测"] + model_type_options[
|
36 |
+
"Paddle 目标检测 (量化)"]:
|
37 |
+
for edge_model_type in model_type_options["YOLO 语义分割"] + model_type_options["YOLO 语义分割 (小型)"] + model_type_options[
|
38 |
+
"Paddle 语义分割"] + model_type_options["Paddle 语义分割 (量化)"]:
|
39 |
+
for cls_model_type in model_type_options["Paddle 方向分类"]:
|
40 |
+
detector_key = (obj_model_type, edge_model_type, cls_model_type)
|
41 |
+
preinitialized_detectors[detector_key] = TableDetector(
|
42 |
+
obj_model_type=obj_model_type,
|
43 |
+
edge_model_type=edge_model_type,
|
44 |
+
cls_model_type=cls_model_type,
|
45 |
+
obj_model_path=os.path.join("models", f"{obj_model_type}.onnx"),
|
46 |
+
edge_model_path=os.path.join("models", f"{edge_model_type}.onnx"),
|
47 |
+
cls_model_path=os.path.join("models", f"{cls_model_type}.onnx")
|
48 |
+
)
|
49 |
+
|
50 |
+
|
51 |
+
# 定义图片缩放函数
|
52 |
+
def resize_image(image, max_size=640):
|
53 |
+
height, width = image.shape[:2]
|
54 |
+
if max(height, width) > max_size:
|
55 |
+
scale = max_size / max(height, width)
|
56 |
+
new_height = int(height * scale)
|
57 |
+
new_width = int(width * scale)
|
58 |
+
image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)
|
59 |
+
return image
|
60 |
+
|
61 |
+
|
62 |
+
# 定义推理函数
|
63 |
+
def run_inference(img_path, obj_model_type, edge_model_type, cls_model_type, det_accuracy, use_obj_det, use_edge_det,
|
64 |
+
use_cls_det):
|
65 |
+
detector_key = (obj_model_type, edge_model_type, cls_model_type)
|
66 |
+
table_det = preinitialized_detectors[detector_key]
|
67 |
+
result, elapse = table_det(
|
68 |
+
img_path,
|
69 |
+
det_accuracy=det_accuracy,
|
70 |
+
use_obj_det=use_obj_det,
|
71 |
+
use_edge_det=use_edge_det,
|
72 |
+
use_cls_det=use_cls_det
|
73 |
+
)
|
74 |
+
|
75 |
+
# 加载图片
|
76 |
+
img = img_loader(img_path)
|
77 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
78 |
+
extract_img = img.copy()
|
79 |
+
visual_img = img.copy()
|
80 |
+
extract_imgs = []
|
81 |
+
|
82 |
+
for i, res in enumerate(result):
|
83 |
+
box = res["box"]
|
84 |
+
lt, rt, rb, lb = res["lt"], res["rt"], res["rb"], res["lb"]
|
85 |
+
# 带识别框和左上角方向位置
|
86 |
+
visual_img = visuallize(visual_img, box, lt, rt, rb, lb)
|
87 |
+
# 透视变换提取表格图片
|
88 |
+
wrapped_img = extract_table_img(extract_img.copy(), lt, rt, rb, lb)
|
89 |
+
extract_imgs.append(wrapped_img)
|
90 |
+
# 缩放图片
|
91 |
+
visual_img = resize_image(visual_img)
|
92 |
+
extract_imgs = [resize_image(img) for img in extract_imgs]
|
93 |
+
|
94 |
+
obj_det_elapse, edge_elapse, rotate_det_elapse = elapse
|
95 |
+
return visual_img, extract_imgs, f"obj_det_elapse:{obj_det_elapse}, edge_elapse={edge_elapse}, rotate_det_elapse={rotate_det_elapse}"
|
96 |
+
|
97 |
+
|
98 |
+
def update_extract_outputs(visual_img, extract_imgs, time_info):
|
99 |
+
if len(extract_imgs) == 1:
|
100 |
+
return visual_img, extract_imgs[0], time_info
|
101 |
+
else:
|
102 |
+
return visual_img, extract_imgs, time_info
|
103 |
+
|
104 |
+
|
105 |
+
# 创建Gradio界面
|
106 |
+
with gr.Blocks(
|
107 |
+
css="""
|
108 |
+
.scrollable-container {
|
109 |
+
overflow-x: auto;
|
110 |
+
white-space: nowrap;
|
111 |
+
}
|
112 |
+
.header-links {
|
113 |
+
text-align: center;
|
114 |
+
}
|
115 |
+
.header-links a {
|
116 |
+
display: inline-block;
|
117 |
+
text-align: center;
|
118 |
+
margin-right: 10px; /* 调整间距 */
|
119 |
+
}
|
120 |
+
"""
|
121 |
+
) as demo:
|
122 |
+
gr.HTML(
|
123 |
+
"<h1 style='text-align: center;'><a href='https://github.com/RapidAI/RapidTableDetection'>RapidTableDetection</a></h1>"
|
124 |
+
)
|
125 |
+
gr.HTML('''
|
126 |
+
<div class="header-links">
|
127 |
+
<a href=""><img src="https://img.shields.io/badge/Python->=3.8,<3.12-aff.svg"></a>
|
128 |
+
<a href=""><img src="https://img.shields.io/badge/OS-Linux%2C%20Mac%2C%20Win-pink.svg"></a>
|
129 |
+
<a href="https://semver.org/"><img alt="SemVer2.0" src="https://img.shields.io/badge/SemVer-2.0-brightgreen"></a>
|
130 |
+
<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
|
131 |
+
<a href="https://github.com/RapidAI/TableStructureRec/blob/c41bbd23898cb27a957ed962b0ffee3c74dfeff1/LICENSE"><img alt="GitHub" src="https://img.shields.io/badge/license-Apache 2.0-blue"></a>
|
132 |
+
</div>
|
133 |
+
''')
|
134 |
+
with gr.Row():
|
135 |
+
with gr.Column(variant="panel", scale=1):
|
136 |
+
img_input = gr.Image(label="Upload or Select Image", sources="upload", value="images/real1.jpg")
|
137 |
+
|
138 |
+
# 示例图片选择器
|
139 |
+
examples = gr.Examples(
|
140 |
+
examples=example_images,
|
141 |
+
examples_per_page=len(example_images),
|
142 |
+
inputs=img_input,
|
143 |
+
fn=lambda x: x, # 简单返回图片路径
|
144 |
+
outputs=img_input,
|
145 |
+
cache_examples=False
|
146 |
+
)
|
147 |
+
|
148 |
+
obj_model_type = gr.Dropdown(
|
149 |
+
choices=model_type_options["YOLO 目标检测"] + model_type_options["Paddle 目标检测"] +
|
150 |
+
model_type_options["Paddle 目标检测 (量化)"],
|
151 |
+
value="yolo_obj_det",
|
152 |
+
label="obj det model")
|
153 |
+
edge_model_type = gr.Dropdown(
|
154 |
+
choices=model_type_options["YOLO 语义分割"] + model_type_options["YOLO 语义分割 (小型)"] +
|
155 |
+
model_type_options["Paddle 语义分割"] + model_type_options["Paddle 语义分割 (量化)"],
|
156 |
+
value="yolo_edge_det",
|
157 |
+
label="edge seg model")
|
158 |
+
cls_model_type = gr.Dropdown(choices=model_type_options["Paddle 方向分类"],
|
159 |
+
value="paddle_cls_det",
|
160 |
+
label="direction cls model")
|
161 |
+
|
162 |
+
det_accuracy = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="目标检测置信度阈值")
|
163 |
+
use_obj_det = gr.Checkbox(value=True, label="use obj det")
|
164 |
+
use_edge_det = gr.Checkbox(value=True, label="use edge seg")
|
165 |
+
use_cls_det = gr.Checkbox(value=True, label="use direction cls")
|
166 |
+
|
167 |
+
run_button = gr.Button("run")
|
168 |
+
|
169 |
+
with gr.Column(scale=2):
|
170 |
+
visual_output = gr.Image(label="output visualize")
|
171 |
+
extract_outputs = gr.Gallery(label="extracted images", object_fit="contain", columns=1, preview=True)
|
172 |
+
time_output = gr.Textbox(label="elapsed")
|
173 |
+
|
174 |
+
run_button.click(
|
175 |
+
fn=run_inference,
|
176 |
+
inputs=[img_input, obj_model_type, edge_model_type, cls_model_type, det_accuracy, use_obj_det, use_edge_det,
|
177 |
+
use_cls_det],
|
178 |
+
outputs=[visual_output, extract_outputs, time_output]
|
179 |
+
)
|
180 |
+
|
181 |
+
# 启动Gradio应用
|
182 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
rapid-table-det
|