Spaces:
Runtime error
Runtime error
Reverted
Browse files
app.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import hf_hub_download
|
| 3 |
-
from PIL import Image
|
| 4 |
import torch
|
| 5 |
from transformers import AutoImageProcessor, AutoModelForObjectDetection
|
| 6 |
|
|
|
|
| 7 |
# Load the processor and model for table structure recognition
|
| 8 |
processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-structure-recognition")
|
| 9 |
model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition")
|
|
@@ -20,34 +21,16 @@ def predict(image):
|
|
| 20 |
# Extract bounding boxes and class labels
|
| 21 |
predicted_boxes = outputs.pred_boxes[0].cpu().numpy() # First image
|
| 22 |
predicted_classes = outputs.logits.argmax(-1).cpu().numpy() # Class predictions
|
| 23 |
-
|
| 24 |
-
#
|
| 25 |
-
|
| 26 |
-
width, height = image.size
|
| 27 |
-
|
| 28 |
-
# Loop over all detected boxes and draw them on the image
|
| 29 |
-
for box in predicted_boxes:
|
| 30 |
-
# Box coordinates are normalized, so multiply by image dimensions
|
| 31 |
-
x0, y0, x1, y1 = box
|
| 32 |
-
|
| 33 |
-
# Ensure that y0 < y1 and x0 < x1
|
| 34 |
-
if x1 < x0:
|
| 35 |
-
x0, x1 = x1, x0
|
| 36 |
-
if y1 < y0:
|
| 37 |
-
y0, y1 = y1, y0
|
| 38 |
-
|
| 39 |
-
# Draw the rectangle
|
| 40 |
-
draw.rectangle([x0 * width, y0 * height, x1 * width, y1 * height], outline="red", width=3)
|
| 41 |
-
|
| 42 |
-
# Return the image with bounding boxes drawn
|
| 43 |
-
return image
|
| 44 |
|
| 45 |
# Set up the Gradio interface
|
| 46 |
interface = gr.Interface(
|
| 47 |
fn=predict, # The function that gets called when an image is uploaded
|
| 48 |
inputs=gr.Image(type="pil"), # Image input (as PIL image)
|
| 49 |
-
outputs=
|
| 50 |
)
|
| 51 |
|
| 52 |
# Launch the Gradio app
|
| 53 |
-
interface.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import hf_hub_download
|
| 3 |
+
from PIL import Image
|
| 4 |
import torch
|
| 5 |
from transformers import AutoImageProcessor, AutoModelForObjectDetection
|
| 6 |
|
| 7 |
+
gr.load("models/microsoft/table-transformer-structure-recognition").launch()
|
| 8 |
# Load the processor and model for table structure recognition
|
| 9 |
processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-structure-recognition")
|
| 10 |
model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition")
|
|
|
|
| 21 |
# Extract bounding boxes and class labels
|
| 22 |
predicted_boxes = outputs.pred_boxes[0].cpu().numpy() # First image
|
| 23 |
predicted_classes = outputs.logits.argmax(-1).cpu().numpy() # Class predictions
|
| 24 |
+
|
| 25 |
+
# Return the bounding boxes for display
|
| 26 |
+
return {"boxes": predicted_boxes.tolist(), "classes": predicted_classes.tolist()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
# Set up the Gradio interface
|
| 29 |
interface = gr.Interface(
|
| 30 |
fn=predict, # The function that gets called when an image is uploaded
|
| 31 |
inputs=gr.Image(type="pil"), # Image input (as PIL image)
|
| 32 |
+
outputs="json", # Outputting a JSON with the boxes and classes
|
| 33 |
)
|
| 34 |
|
| 35 |
# Launch the Gradio app
|
| 36 |
+
interface.launch()
|