mp-02 commited on
Commit
415bb0b
·
verified ·
1 Parent(s): 03e0cab

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -52
app.py DELETED
@@ -1,52 +0,0 @@
1
- from cord_inference import prediction as cord_prediction
2
- from sroie_inference import prediction as sroie_prediction
3
- import gradio as gr
4
- import json
5
-
6
- def prediction(image_path: str):
7
-
8
- #we first use mp-02/layoutlmv3-finetuned-cord on the image, which gives us a JSON with some info and a blurred image
9
- d, image = sroie_prediction(image_path)
10
-
11
- #we save the blurred image in order to pass it to the other model
12
- image_path_blurred = image_path.split('.')[0] + '_blurred.' + image_path.split('.')[1]
13
- image.save(image_path_blurred)
14
-
15
- #then we use the model fine-tuned on sroie (for now it is Theivaprakasham/layoutlmv3-finetuned-sroie)
16
- d1, image1 = cord_prediction(image_path_blurred)
17
-
18
- #we then link the two json files
19
- if len(d) == 0:
20
- k = d1
21
- else:
22
- k = json.dumps(d).split('}')[0] + ', ' + json.dumps(d1).split('{')[1]
23
-
24
- return d, image, d1, image1, k
25
-
26
- # p,i,j = prediction("11990982-img.png")
27
- # print(p)
28
-
29
-
30
- title = "Interactive demo: LayoutLMv3 for receipts"
31
- description = "Demo for Microsoft's LayoutLMv3, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on CORD and SROIE, which are datasets of receipts.\n It firsts uses the fine-tune on SROIE to extract date, company and address, then the fine-tune on CORD for the other info.\n To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
32
- examples = [['image.png']]
33
-
34
- css = """.output_image, .input_image {height: 600px !important}"""
35
-
36
-
37
- # we use a gradio interface that takes in input an image and return a JSON file that contains its info
38
- # we show also the intermediate steps (first we take some info with the model fine-tuned on SROIE and we blur the relative boxes
39
- # then we pass the image to the model fine-tuned on CORD
40
- iface = gr.Interface(fn=prediction,
41
- inputs=gr.Image(type="filepath"),
42
- outputs=[gr.JSON(label="json parsing"),
43
- gr.Image(type="pil", label="blurred image"),
44
- gr.JSON(label="json parsing"),
45
- gr.Image(type="pil", label="annotated image"),
46
- gr.JSON(label="json parsing")],
47
- title=title,
48
- description=description,
49
- examples=examples,
50
- css=css)
51
-
52
- iface.launch()