Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,10 @@ from dotenv import load_dotenv
|
|
3 |
import google.generativeai as genai
|
4 |
from pathlib import Path
|
5 |
import gradio as gr
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Load environment variables from .env file
|
8 |
load_dotenv()
|
@@ -90,4 +94,28 @@ with gr.Blocks() as demo:
|
|
90 |
upload_button.upload(fn=lambda files: files[0].name if files else None, inputs=[upload_button], outputs=image_output)
|
91 |
generate_button.click(fn=process_generate, inputs=[upload_button], outputs=[image_output, file_output])
|
92 |
|
93 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import google.generativeai as genai
|
4 |
from pathlib import Path
|
5 |
import gradio as gr
|
6 |
+
from transformers import DetrImageProcessor, DetrForObjectDetection
|
7 |
+
import torch
|
8 |
+
from PIL import Image
|
9 |
+
import requests
|
10 |
|
11 |
# Load environment variables from .env file
|
12 |
load_dotenv()
|
|
|
94 |
upload_button.upload(fn=lambda files: files[0].name if files else None, inputs=[upload_button], outputs=image_output)
|
95 |
generate_button.click(fn=process_generate, inputs=[upload_button], outputs=[image_output, file_output])
|
96 |
|
97 |
+
demo.launch()
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
102 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
103 |
+
|
104 |
+
# you can specify the revision tag if you don't want the timm dependency
|
105 |
+
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
|
106 |
+
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
|
107 |
+
|
108 |
+
inputs = processor(images=image, return_tensors="pt")
|
109 |
+
outputs = model(**inputs)
|
110 |
+
|
111 |
+
# convert outputs (bounding boxes and class logits) to COCO API
|
112 |
+
# let's only keep detections with score > 0.9
|
113 |
+
target_sizes = torch.tensor([image.size[::-1]])
|
114 |
+
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0]
|
115 |
+
|
116 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
117 |
+
box = [round(i, 2) for i in box.tolist()]
|
118 |
+
print(
|
119 |
+
f"Detected {model.config.id2label[label.item()]} with confidence "
|
120 |
+
f"{round(score.item(), 3)} at location {box}"
|
121 |
+
)
|