Keemoz0 commited on
Commit
eac3912
·
1 Parent(s): cc7610a

attempt 4 of logging

Browse files
Files changed (1) hide show
  1. app.py +10 -20
app.py CHANGED
@@ -1,7 +1,10 @@
1
  import gradio as gr
2
- from transformers import AutoImageProcessor, AutoModelForObjectDetection
 
3
  import torch
 
4
 
 
5
  # Load the processor and model for table structure recognition
6
  processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-structure-recognition")
7
  model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition")
@@ -17,30 +20,17 @@ def predict(image):
17
 
18
  # Extract bounding boxes and class labels
19
  predicted_boxes = outputs.pred_boxes[0].cpu().numpy() # First image
20
- predicted_class_logits = outputs.logits[0].cpu().numpy() # Class logits for the first image
21
- predicted_classes = predicted_class_logits.argmax(-1) # Get class predictions
22
- class_names = model.config.id2label # Get the class name mapping
23
-
24
- # Collect the class IDs and labels along with the bounding boxes
25
- result = []
26
- for idx, class_id in enumerate(predicted_classes):
27
- class_name = class_names[class_id]
28
- result.append({
29
- "class_id": int(class_id),
30
- "class_name": class_name,
31
- "bounding_box": predicted_boxes[idx].tolist() # Convert to list for JSON serialization
32
- })
33
-
34
- # Return the bounding boxes and classes
35
- return result
36
 
37
  # Set up the Gradio interface
38
  interface = gr.Interface(
39
  fn=predict, # The function that gets called when an image is uploaded
40
  inputs=gr.Image(type="pil"), # Image input (as PIL image)
41
- outputs="json", # Outputting a JSON with the class labels, IDs, and bounding boxes
42
- title="Table Structure Recognition", # Add title for clarity
43
- description="Upload an image and see the detected table columns and their corresponding class IDs.",
44
  )
45
 
46
  # Launch the Gradio app
 
1
  import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ from PIL import Image
4
  import torch
5
+ from transformers import AutoImageProcessor, AutoModelForObjectDetection
6
 
7
+ gr.load("models/microsoft/table-transformer-structure-recognition").launch()
8
  # Load the processor and model for table structure recognition
9
  processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-structure-recognition")
10
  model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition")
 
20
 
21
  # Extract bounding boxes and class labels
22
  predicted_boxes = outputs.pred_boxes[0].cpu().numpy() # First image
23
+ predicted_classes = outputs.logits.argmax(-1).cpu().numpy() # Class predictions
24
+ # Return the bounding boxes for display
25
+ print("Predicted Classes (IDs):", predicted_classes)
26
+ print("Bounding Boxes (x1, y1, x2, y2):", predicted_boxes)
27
+ return {"boxes": predicted_boxes.tolist(), "classes": predicted_classes.tolist()}
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # Set up the Gradio interface
30
  interface = gr.Interface(
31
  fn=predict, # The function that gets called when an image is uploaded
32
  inputs=gr.Image(type="pil"), # Image input (as PIL image)
33
+ outputs="json", # Outputting a JSON with the boxes and classes
 
 
34
  )
35
 
36
  # Launch the Gradio app