nielsr HF staff commited on
Commit
6a94555
·
1 Parent(s): b098c92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -45,14 +45,20 @@ demo = gr.Interface(
45
  fn=process_document,
46
  inputs= gr.inputs.Image(type="pil"),
47
  outputs="json",
48
- title=f"Interactive demo: Donut 🍩 for DocVQA",
49
- description="""This model is fine-tuned on the DocVQA dataset. <br>
50
- Documentation: https://huggingface.co/docs/transformers/main/en/model_doc/donut
51
- Notebooks: https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Donut
 
 
 
 
 
 
 
 
 
52
 
53
- More details are available at:
54
- - Paper: https://arxiv.org/abs/2111.15664
55
- - Original repository: https://github.com/clovaai/donut""",
56
  examples=[["example_1.png"]],
57
  cache_examples=False,
58
  )
 
45
  fn=process_document,
46
  inputs= gr.inputs.Image(type="pil"),
47
  outputs="json",
48
+ title=f"Demo: Donut 🍩 for DocVQA",
49
+ description = "Gradio Demo for Donut, an instance of `VisionEncoderDecoderModel` fine-tuned on DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
50
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>"
51
+
52
+ interface = gr.Interface(fn=answer_question,
53
+ inputs=[image, question],
54
+ outputs=answer,
55
+ examples=examples,
56
+ title=title,
57
+ description=description,
58
+ article=article,
59
+ enable_queue=True)
60
+ interface.launch(debug=True)
61
 
 
 
 
62
  examples=[["example_1.png"]],
63
  cache_examples=False,
64
  )