File size: 1,538 Bytes
cb14db8
eac3912
 
34b6cd1
eac3912
cb14db8
eac3912
5c56c76
 
 
 
fa8646f
5c56c76
 
fa8646f
5c56c76
 
 
 
 
fa8646f
5c56c76
eac3912
 
 
 
 
a6fc7d1
5c56c76
 
 
 
eac3912
5c56c76
 
 
f58ee97
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from huggingface_hub import hf_hub_download
from PIL import Image
import torch
from transformers import AutoImageProcessor, AutoModelForObjectDetection

gr.load("models/microsoft/table-transformer-structure-recognition").launch()
# Load the processor and model for table structure recognition
processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-structure-recognition")
model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition")

# Define the inference function
def predict(image):
    # Preprocess the input image
    inputs = processor(images=image, return_tensors="pt")
    
    # Perform object detection using the model
    with torch.no_grad():
        outputs = model(**inputs)

    # Extract bounding boxes and class labels
    predicted_boxes = outputs.pred_boxes[0].cpu().numpy()  # First image
    predicted_classes = outputs.logits.argmax(-1).cpu().numpy()  # Class predictions
    # Return the bounding boxes for display
    print("Predicted Classes (IDs):", predicted_classes)
    print("Bounding Boxes (x1, y1, x2, y2):", predicted_boxes)
    return {"boxes": predicted_boxes.tolist(), "classes": predicted_classes.tolist()}

# Set up the Gradio interface
interface = gr.Interface(
    fn=predict,  # The function that gets called when an image is uploaded
    inputs=gr.Image(type="pil"),  # Image input (as PIL image)
    outputs="json",  # Outputting a JSON with the boxes and classes
)

# Launch the Gradio app
interface.launch()