BenK0y commited on
Commit
5da7355
·
verified ·
1 Parent(s): e310b57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -5,7 +5,7 @@ from pathlib import Path
5
  import gradio as gr
6
  from transformers import DetrImageProcessor, DetrForObjectDetection
7
  import torch
8
- from PIL import Image
9
  import requests
10
 
11
  # Load environment variables from .env file
@@ -78,6 +78,25 @@ def upload_file(files):
78
  response = generate_gemini_response(input_prompt, file_paths[0])
79
  return file_paths[0], response
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  with gr.Blocks() as demo:
82
  header = gr.Label("RADARPICK: Vous avez été radarisé!")
83
  image_output = gr.Image()
@@ -89,33 +108,12 @@ with gr.Blocks() as demo:
89
  def process_generate(files):
90
  if not files:
91
  return None, "Image not uploaded"
92
- return upload_file(files)
 
 
 
93
 
94
  upload_button.upload(fn=lambda files: files[0].name if files else None, inputs=[upload_button], outputs=image_output)
95
  generate_button.click(fn=process_generate, inputs=[upload_button], outputs=[image_output, file_output])
96
 
97
  demo.launch()
98
-
99
-
100
-
101
- url = "http://images.cocodataset.org/val2017/000000039769.jpg"
102
- image = Image.open(requests.get(url, stream=True).raw)
103
-
104
- # you can specify the revision tag if you don't want the timm dependency
105
- processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
106
- model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
107
-
108
- inputs = processor(images=image, return_tensors="pt")
109
- outputs = model(**inputs)
110
-
111
- # convert outputs (bounding boxes and class logits) to COCO API
112
- # let's only keep detections with score > 0.9
113
- target_sizes = torch.tensor([image.size[::-1]])
114
- results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0]
115
-
116
- for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
117
- box = [round(i, 2) for i in box.tolist()]
118
- print(
119
- f"Detected {model.config.id2label[label.item()]} with confidence "
120
- f"{round(score.item(), 3)} at location {box}"
121
- )
 
5
  import gradio as gr
6
  from transformers import DetrImageProcessor, DetrForObjectDetection
7
  import torch
8
+ from PIL import Image, ImageDraw
9
  import requests
10
 
11
  # Load environment variables from .env file
 
78
  response = generate_gemini_response(input_prompt, file_paths[0])
79
  return file_paths[0], response
80
 
81
+ # Object detection part
82
+ def detect_objects(image):
83
+ processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
84
+ model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
85
+
86
+ inputs = processor(images=image, return_tensors="pt")
87
+ outputs = model(**inputs)
88
+
89
+ target_sizes = torch.tensor([image.size[::-1]])
90
+ results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0]
91
+
92
+ draw = ImageDraw.Draw(image)
93
+ for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
94
+ box = [round(i, 2) for i in box.tolist()]
95
+ draw.rectangle(box, outline="red", width=3)
96
+ draw.text((box[0], box[1]), f"{model.config.id2label[label.item()]}: {round(score.item(), 2)}", fill="red")
97
+
98
+ return image
99
+
100
  with gr.Blocks() as demo:
101
  header = gr.Label("RADARPICK: Vous avez été radarisé!")
102
  image_output = gr.Image()
 
108
  def process_generate(files):
109
  if not files:
110
  return None, "Image not uploaded"
111
+ file_path = files[0].name
112
+ image = Image.open(file_path)
113
+ detected_image = detect_objects(image)
114
+ return detected_image, upload_file(files)[1]
115
 
116
  upload_button.upload(fn=lambda files: files[0].name if files else None, inputs=[upload_button], outputs=image_output)
117
  generate_button.click(fn=process_generate, inputs=[upload_button], outputs=[image_output, file_output])
118
 
119
  demo.launch()