rdezwart commited on
Commit
ada211a
·
1 Parent(s): 7b6a6bf

Begin implementing object detection

Browse files

We're using the 'hustvl/yolos-small-300' for this.

Files changed (1) hide show
  1. app.py +55 -16
app.py CHANGED
@@ -1,27 +1,42 @@
1
  from threading import Thread
2
 
3
  import gradio as gr
4
- from transformers import PreTrainedModel
5
- from transformers import TextIteratorStreamer, AutoModelForCausalLM, AutoTokenizer
 
 
6
 
 
7
  # Moondream does not support the HuggingFace pipeline system, so we have to do it manually
8
  moondream_id = "vikhyatk/moondream2"
9
  moondream_revision = "2024-04-02"
10
  moondream_tokenizer = AutoTokenizer.from_pretrained(moondream_id, revision=moondream_revision)
11
- moondream: PreTrainedModel = AutoModelForCausalLM.from_pretrained(
12
  moondream_id, trust_remote_code=True, revision=moondream_revision
13
  )
14
- moondream.eval()
15
 
 
 
 
 
16
 
17
- def answer_question(_img, _prompt):
18
- image_embeds = moondream.encode_image(_img)
 
 
 
 
 
 
 
 
19
  streamer = TextIteratorStreamer(moondream_tokenizer, skip_special_tokens=True)
20
  thread = Thread(
21
- target=moondream.answer_question,
22
  kwargs={
23
  "image_embeds": image_embeds,
24
- "question": _prompt,
25
  "tokenizer": moondream_tokenizer,
26
  "streamer": streamer,
27
  },
@@ -34,6 +49,21 @@ def answer_question(_img, _prompt):
34
  yield buffer.strip()
35
 
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  if __name__ == "__main__":
38
  with gr.Blocks() as app:
39
  gr.Markdown(
@@ -43,13 +73,22 @@ if __name__ == "__main__":
43
  Final project for IAT 481 at Simon Fraser University, Spring 2024.
44
  """
45
  )
46
- with gr.Row():
47
- prompt = gr.Textbox(label="Input", value="Describe this image.")
48
- submit = gr.Button("Submit")
49
- with gr.Row():
50
- img = gr.Image(label="Image", type="pil")
51
- output = gr.TextArea(label="Output")
52
-
53
- submit.click(answer_question, [img, prompt], output)
 
 
 
 
 
 
 
 
 
54
 
55
  app.queue().launch()
 
1
  from threading import Thread
2
 
3
  import gradio as gr
4
+ import torch
5
+ from transformers import PreTrainedModel # for type hint
6
+ from transformers import TextIteratorStreamer, AutoModelForCausalLM, AutoTokenizer # Moondream
7
+ from transformers import YolosImageProcessor, YolosForObjectDetection # YOLOS-small-300
8
 
9
+ # --- Moondream --- #
10
  # Moondream does not support the HuggingFace pipeline system, so we have to do it manually
11
  moondream_id = "vikhyatk/moondream2"
12
  moondream_revision = "2024-04-02"
13
  moondream_tokenizer = AutoTokenizer.from_pretrained(moondream_id, revision=moondream_revision)
14
+ moondream_model: PreTrainedModel = AutoModelForCausalLM.from_pretrained(
15
  moondream_id, trust_remote_code=True, revision=moondream_revision
16
  )
17
+ moondream_model.eval()
18
 
19
+ # --- YOLOS --- #
20
+ yolos_id = "hustvl/yolos-small-300"
21
+ yolos_processor: YolosImageProcessor = YolosImageProcessor.from_pretrained(yolos_id)
22
+ yolos_model: YolosForObjectDetection = YolosForObjectDetection.from_pretrained(yolos_id)
23
 
24
+
25
+ def answer_question(img, prompt):
26
+ """
27
+ Submits an image and prompt to the Moondream model.
28
+
29
+ :param img:
30
+ :param prompt:
31
+ :return: yields the output buffer string
32
+ """
33
+ image_embeds = moondream_model.encode_image(img)
34
  streamer = TextIteratorStreamer(moondream_tokenizer, skip_special_tokens=True)
35
  thread = Thread(
36
+ target=moondream_model.answer_question,
37
  kwargs={
38
  "image_embeds": image_embeds,
39
+ "question": prompt,
40
  "tokenizer": moondream_tokenizer,
41
  "streamer": streamer,
42
  },
 
49
  yield buffer.strip()
50
 
51
 
52
+ def detect_objects(img):
53
+ inputs = yolos_processor(images=img, return_tensors="pt")
54
+ outputs = yolos_model(**inputs)
55
+
56
+ target_sizes = torch.tensor([img.size[::-1]])
57
+ results = yolos_processor.post_process_object_detection(outputs, threshold=0.7, target_sizes=target_sizes)[0]
58
+
59
+ for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
60
+ box = [round(i, 2) for i in box.tolist()]
61
+ print(
62
+ f"Detected {yolos_model.config.id2label[label.item()]} with confidence "
63
+ f"{round(score.item(), 3)} at location {box}"
64
+ )
65
+
66
+
67
  if __name__ == "__main__":
68
  with gr.Blocks() as app:
69
  gr.Markdown(
 
73
  Final project for IAT 481 at Simon Fraser University, Spring 2024.
74
  """
75
  )
76
+
77
+ with gr.Tab("Object Detection"):
78
+ with gr.Row():
79
+ yolos_input = gr.Image()
80
+ yolos_output = gr.Image()
81
+ yolos_button = gr.Button("Submit")
82
+
83
+ with gr.Tab("Inference"):
84
+ with gr.Row():
85
+ moon_prompt = gr.Textbox(label="Input", value="Describe this image.")
86
+ moon_submit = gr.Button("Submit")
87
+ with gr.Row():
88
+ moon_img = gr.Image(label="Image", type="pil")
89
+ moon_output = gr.TextArea(label="Output")
90
+
91
+ moon_submit.click(answer_question, [moon_img, moon_prompt], moon_output)
92
+ yolos_button.click(detect_objects, [yolos_input], yolos_output)
93
 
94
  app.queue().launch()