tanishq1508 commited on
Commit
9b58dee
·
1 Parent(s): 69bfa5d

Added phi_1.5

Browse files
Files changed (1) hide show
  1. run.py +18 -2
run.py CHANGED
@@ -4,16 +4,32 @@ import numpy as np
4
  import torch
5
  model = torch.hub.load('ultralytics/yolov5', 'custom', path='last2.pt', force_reload=True)
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def detect(im):
8
  results = model(im)
9
- return results
10
  #return [np.squeeze(results.render())]
11
  #return [im]
12
 
13
  demo = gr.Interface(
14
  detect,
15
  [gr.Image(source="webcam", tool=None)],
16
- ["text"],
17
  )
18
  if __name__ == "__main__":
19
  demo.launch(share=True)
 
4
  import torch
5
  model = torch.hub.load('ultralytics/yolov5', 'custom', path='last2.pt', force_reload=True)
6
 
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer
8
+ llm = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5", trust_remote_code=True)
9
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5", trust_remote_code=True)
10
+
11
+ def give(results):
12
+ prompt=""
13
+ if int(results.pred[0][-1][-1].numpy())==0:
14
+ prompt="Great focus. You are doing great!"
15
+ elif int(results.pred[0][-1][-1].numpy())==1:
16
+ prompt="You look dizzy."
17
+ else:
18
+ prompt="You are getting distracted."
19
+ inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False)
20
+ outputs = llm.generate(**inputs, max_length=30)
21
+ return tokenizer.batch_decode(outputs)[0]
22
+
23
  def detect(im):
24
  results = model(im)
25
+ return [results,give(results)]
26
  #return [np.squeeze(results.render())]
27
  #return [im]
28
 
29
  demo = gr.Interface(
30
  detect,
31
  [gr.Image(source="webcam", tool=None)],
32
+ ["text","text"],
33
  )
34
  if __name__ == "__main__":
35
  demo.launch(share=True)