Spaces:
Runtime error
Runtime error
bug fix
Browse files
app.py
CHANGED
@@ -11,13 +11,16 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
11 |
model.to(device)
|
12 |
|
13 |
def process_document(image):
|
|
|
|
|
|
|
14 |
# prepare encoder inputs
|
15 |
pixel_values = processor(image, return_tensors="pt").pixel_values
|
16 |
|
17 |
# prepare decoder inputs
|
18 |
task_prompt = "<s_cord-v2>"
|
19 |
decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids
|
20 |
-
|
21 |
# generate answer
|
22 |
outputs = model.generate(
|
23 |
pixel_values.to(device),
|
@@ -45,7 +48,7 @@ demo = gr.Interface(
|
|
45 |
fn=process_document,
|
46 |
inputs="image",
|
47 |
outputs="json",
|
48 |
-
title="
|
49 |
article=article,
|
50 |
enable_queue=True,
|
51 |
examples=[["example.png"], ["example_2.png"], ["example_3.png"]],
|
|
|
11 |
model.to(device)
|
12 |
|
13 |
def process_document(image):
|
14 |
+
|
15 |
+
print(image)
|
16 |
+
print(f"Type of Image {image}")
|
17 |
# prepare encoder inputs
|
18 |
pixel_values = processor(image, return_tensors="pt").pixel_values
|
19 |
|
20 |
# prepare decoder inputs
|
21 |
task_prompt = "<s_cord-v2>"
|
22 |
decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids
|
23 |
+
|
24 |
# generate answer
|
25 |
outputs = model.generate(
|
26 |
pixel_values.to(device),
|
|
|
48 |
fn=process_document,
|
49 |
inputs="image",
|
50 |
outputs="json",
|
51 |
+
title="Template-Free OCR model",
|
52 |
article=article,
|
53 |
enable_queue=True,
|
54 |
examples=[["example.png"], ["example_2.png"], ["example_3.png"]],
|