Spaces:
Sleeping
Sleeping
Commit
·
f5ffb8d
1
Parent(s):
f4476a2
update model and update desc
Browse files
app.py
CHANGED
@@ -9,30 +9,29 @@ def demo_process(input_img):
|
|
9 |
# input_img = Image.fromarray(input_img)
|
10 |
output = pretrained_model.inference(image=input_img, prompt=task_prompt)["predictions"][0]
|
11 |
return output
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
image.
|
17 |
-
image
|
18 |
-
image.
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
21 |
pretrained_model.eval()
|
22 |
|
23 |
demo = gr.Interface(
|
24 |
fn=demo_process,
|
25 |
inputs= gr.Image(type="pil"),
|
26 |
outputs="json",
|
27 |
-
title=f"Donut 🍩 demonstration for `
|
28 |
-
description="""
|
29 |
-
|
30 |
-
More CORD receipt images are available at https://huggingface.co/datasets/naver-clova-ix/cord-v2
|
31 |
-
|
32 |
-
More details are available at:
|
33 |
-
- Paper: https://arxiv.org/abs/2111.15664
|
34 |
-
- GitHub: https://github.com/clovaai/donut""",
|
35 |
-
examples=[["cord_sample_receipt1.png"], ["cord_sample_receipt2.png"]],
|
36 |
cache_examples=False,
|
37 |
)
|
38 |
|
|
|
9 |
# input_img = Image.fromarray(input_img)
|
10 |
output = pretrained_model.inference(image=input_img, prompt=task_prompt)["predictions"][0]
|
11 |
return output
|
12 |
+
task_name = "preparedFinetuneData"
|
13 |
+
# task_name = "cord-v2"
|
14 |
+
task_prompt = f"<s_{task_name}>"
|
15 |
+
|
16 |
+
image = Image.open("preparedFinetuneData/test/100.jpg")
|
17 |
+
image.save("sample_receipt1.png")
|
18 |
+
image = Image.open("preparedFinetuneData/test/101.jpg")
|
19 |
+
image.save("sample_receipt2.png")
|
20 |
+
|
21 |
+
PATH = 'epochs30_base_on_donut_base/'
|
22 |
+
# pretrained_model = DonutModel.from_pretrained(PATH, local_files_only=True)
|
23 |
+
# pretrained_model = DonutModel.from_pretrained("doshan1250/p9OcrAiV1", revision="main")
|
24 |
+
pretrained_model = DonutModel.from_pretrained("doshan1250/p9OcrAiV1")
|
25 |
+
# pretrained_model = DonutModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2")
|
26 |
pretrained_model.eval()
|
27 |
|
28 |
demo = gr.Interface(
|
29 |
fn=demo_process,
|
30 |
inputs= gr.Image(type="pil"),
|
31 |
outputs="json",
|
32 |
+
title=f"Donut 🍩 demonstration for `{task_name}` task",
|
33 |
+
description="""Goodarc p9 使用 100 個英文收據訓練. <br>""",
|
34 |
+
examples=[["sample_receipt1.png"], ["sample_receipt2.png"]],
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
cache_examples=False,
|
36 |
)
|
37 |
|