Paul Bird commited on
Commit
7d17e14
·
verified ·
1 Parent(s): ac064d3

removed underscores

Browse files
Files changed (1) hide show
  1. RunBlazeFace.cs +75 -75
RunBlazeFace.cs CHANGED
@@ -13,17 +13,17 @@ using Lays = Unity.Sentis.Layers;
13
  * Put this script on the Main Camera
14
  * Put blazeface.sentis in the Assets/StreamingAssets folder
15
  * Create a RawImage of size 320x320 in the scene
16
- * Put a link to that image in _previewUI
17
  * Put a video in Assets/StreamingAssets folder and put the name of it int videoName
18
- * Or put a test image in _inputImage
19
- * Set _inputType to appropriate input
20
  */
21
 
22
 
23
  public class RunBlazeFace : MonoBehaviour
24
  {
25
  //Drag a link to a raw image here:
26
- public RawImage _previewUI = null;
27
 
28
  // Put your bounding box sprite image here
29
  public Sprite faceTexture;
@@ -34,37 +34,37 @@ public class RunBlazeFace : MonoBehaviour
34
  public string videoName = "chatting.mp4";
35
 
36
  //
37
- public Texture2D _inputImage;
38
 
39
- public InputType _inputType = InputType.Video;
40
 
41
- Vector2Int _resolution = new Vector2Int(640, 640);
42
- WebCamTexture _webcam;
43
- VideoPlayer _video;
44
 
45
  const BackendType backend = BackendType.GPUCompute;
46
 
47
- RenderTexture _targetTexture;
48
  public enum InputType { Image, Video, Webcam };
49
 
50
 
51
  //Some adjustable parameters for the model
52
- [SerializeField, Range(0, 1)] float _iouThreshold = 0.5f;
53
- [SerializeField, Range(0, 1)] float _scoreThreshold = 0.5f;
54
- int _maxOutputBoxes = 64;
55
 
56
- IWorker _worker;
57
 
58
  //Holds image size
59
- int _size;
60
 
61
  Ops ops;
62
  ITensorAllocator allocator;
63
 
64
- Model _model;
65
 
66
  //webcam device name:
67
- const string _deviceName = "";
68
 
69
  bool closing = false;
70
 
@@ -82,7 +82,7 @@ public class RunBlazeFace : MonoBehaviour
82
 
83
  //(Note: if using a webcam on mobile get permissions here first)
84
 
85
- _targetTexture = new RenderTexture(_resolution.x, _resolution.y, 0);
86
 
87
  SetupInput();
88
 
@@ -93,28 +93,28 @@ public class RunBlazeFace : MonoBehaviour
93
 
94
  void SetupInput()
95
  {
96
- switch (_inputType)
97
  {
98
  case InputType.Webcam:
99
  {
100
- _webcam = new WebCamTexture(_deviceName, _resolution.x, _resolution.y);
101
- _webcam.requestedFPS = 30;
102
- _webcam.Play();
103
  break;
104
  }
105
  case InputType.Video:
106
  {
107
- _video = gameObject.AddComponent<VideoPlayer>();//new VideoPlayer();
108
- _video.renderMode = VideoRenderMode.APIOnly;
109
- _video.source = VideoSource.Url;
110
- _video.url = Application.streamingAssetsPath + "/"+videoName;
111
- _video.isLooping = true;
112
- _video.Play();
113
  break;
114
  }
115
  default:
116
  {
117
- Graphics.Blit(_inputImage, _targetTexture);
118
  }
119
  break;
120
  }
@@ -122,35 +122,35 @@ public class RunBlazeFace : MonoBehaviour
122
 
123
  void Update()
124
  {
125
- if (_inputType == InputType.Webcam)
126
  {
127
  // Format video input
128
- if (!_webcam.didUpdateThisFrame) return;
129
 
130
- var aspect1 = (float)_webcam.width / _webcam.height;
131
- var aspect2 = (float)_resolution.x / _resolution.y;
132
  var gap = aspect2 / aspect1;
133
 
134
- var vflip = _webcam.videoVerticallyMirrored;
135
  var scale = new Vector2(gap, vflip ? -1 : 1);
136
  var offset = new Vector2((1 - gap) / 2, vflip ? 1 : 0);
137
 
138
- Graphics.Blit(_webcam, _targetTexture, scale, offset);
139
  }
140
- if (_inputType == InputType.Video)
141
  {
142
- var aspect1 = (float)_video.width / _video.height;
143
- var aspect2 = (float)_resolution.x / _resolution.y;
144
  var gap = aspect2 / aspect1;
145
 
146
  var vflip = false;
147
  var scale = new Vector2(gap, vflip ? -1 : 1);
148
  var offset = new Vector2((1 - gap) / 2, vflip ? 1 : 0);
149
- Graphics.Blit(_video.texture, _targetTexture, scale, offset);
150
  }
151
- if (_inputType == InputType.Image)
152
  {
153
- Graphics.Blit(_inputImage, _targetTexture);
154
  }
155
 
156
  if (Input.GetKeyDown(KeyCode.Escape))
@@ -161,7 +161,7 @@ public class RunBlazeFace : MonoBehaviour
161
 
162
  if (Input.GetKeyDown(KeyCode.P))
163
  {
164
- _previewUI.enabled = !_previewUI.enabled;
165
  }
166
  }
167
 
@@ -170,7 +170,7 @@ public class RunBlazeFace : MonoBehaviour
170
  {
171
  if (!closing)
172
  {
173
- RunInference(_targetTexture);
174
  }
175
  }
176
 
@@ -198,41 +198,41 @@ public class RunBlazeFace : MonoBehaviour
198
  {
199
  float[] offsets = GetGridBoxCoords();
200
 
201
- _model = ModelLoader.Load(Application.streamingAssetsPath + "/blazeface.sentis");
202
 
203
  //We need to add extra layers to the model in order to aggregate the box predicions:
204
 
205
- _size = _model.inputs[0].shape.ToTensorShape()[1]; // Input tensor width
206
 
207
- _model.AddConstant(new Lays.Constant("zero", new int[] { 0 }));
208
- _model.AddConstant(new Lays.Constant("two", new int[] { 2 }));
209
- _model.AddConstant(new Lays.Constant("four", new int[] { 4 }));
210
 
211
- _model.AddLayer(new Lays.Slice("boxes1", "regressors", "zero", "four", "two"));
212
 
213
- _model.AddLayer(new Lays.Transpose("scores", "classificators", new int[] { 0, 2, 1 }));
214
 
215
- _model.AddConstant(new Lays.Constant("eighth", new float[] { 1 / 8f }));
216
- _model.AddConstant(new Lays.Constant("offsets",
217
  new TensorFloat(new TensorShape(1, 896, 4), offsets)
218
  ));
219
- _model.AddLayer(new Lays.Mul("boxes1scaled", "boxes1", "eighth"));
220
- _model.AddLayer(new Lays.Add("boxCoords", "boxes1scaled", "offsets"));
221
- _model.AddOutput("boxCoords");
222
 
223
- _model.AddConstant(new Lays.Constant("maxOutputBoxes", new int[] { _maxOutputBoxes }));
224
- _model.AddConstant(new Lays.Constant("iouThreshold", new float[] { _iouThreshold }));
225
- _model.AddConstant(new Lays.Constant("scoreThreshold", new float[] { _scoreThreshold }));
226
 
227
- _model.AddLayer(new Lays.NonMaxSuppression("NMS", "boxCoords", "scores",
228
  "maxOutputBoxes", "iouThreshold", "scoreThreshold",
229
  centerPointBox: Lays.CenterPointBox.Center
230
  ));
231
- _model.AddOutput("NMS");
232
  }
233
  public void SetupEngine()
234
  {
235
- _worker = WorkerFactory.CreateWorker(backend, _model);
236
  ops = WorkerFactory.CreateOps(backend, allocator);
237
  }
238
 
@@ -269,19 +269,19 @@ public class RunBlazeFace : MonoBehaviour
269
  void ExecuteML(Texture source)
270
  {
271
  var transform = new TextureTransform();
272
- transform.SetDimensions(_size, _size, 3);
273
  transform.SetTensorLayout(0, 3, 1, 2);
274
  using var image0 = TextureConverter.ToTensor(source, transform);
275
 
276
  // Pre-process the image to make input in range (-1..1)
277
  using var image = ops.Mad(image0, 2f, -1f);
278
 
279
- _worker.Execute(image);
280
 
281
- using var boxCoords = _worker.PeekOutput("boxCoords") as TensorFloat; //face coords
282
- using var regressors = _worker.PeekOutput("regressors") as TensorFloat; //contains markers
283
 
284
- var NM1 = _worker.PeekOutput("NMS") as TensorInt;
285
 
286
  using var boxCoords2 = boxCoords.ShallowReshape(boxCoords.shape.Unsqueeze(0)) as TensorFloat;
287
  using var output = ops.GatherND(boxCoords2, NM1, 0);
@@ -294,7 +294,7 @@ public class RunBlazeFace : MonoBehaviour
294
 
295
  ClearAnnotations();
296
 
297
- Vector2 markerScale = _previewUI.rectTransform.rect.size/ 16;
298
 
299
  DrawFaces(output, markersOutput, output.shape[0], markerScale);
300
 
@@ -305,7 +305,7 @@ public class RunBlazeFace : MonoBehaviour
305
  // Face detection
306
  ExecuteML(input);
307
 
308
- _previewUI.texture = input;
309
  }
310
 
311
  public void DrawBox(BoundingBox box, Sprite sprite)
@@ -313,7 +313,7 @@ public class RunBlazeFace : MonoBehaviour
313
  var panel = new GameObject("ObjectBox");
314
  panel.AddComponent<CanvasRenderer>();
315
  panel.AddComponent<Image>();
316
- panel.transform.SetParent(_previewUI.transform, false);
317
 
318
  var img = panel.GetComponent<Image>();
319
  img.color = Color.white;
@@ -326,7 +326,7 @@ public class RunBlazeFace : MonoBehaviour
326
  }
327
  public void ClearAnnotations()
328
  {
329
- foreach (Transform child in _previewUI.transform)
330
  {
331
  Destroy(child.gameObject);
332
  }
@@ -337,12 +337,12 @@ public class RunBlazeFace : MonoBehaviour
337
  closing = true;
338
  ops?.Dispose();
339
  allocator?.Dispose();
340
- if (_webcam) Destroy(_webcam);
341
- if (_video) Destroy(_video);
342
  RenderTexture.active = null;
343
- _targetTexture.Release();
344
- _worker?.Dispose();
345
- _worker = null;
346
  }
347
 
348
  void OnDestroy()
 
13
  * Put this script on the Main Camera
14
  * Put blazeface.sentis in the Assets/StreamingAssets folder
15
  * Create a RawImage of size 320x320 in the scene
16
+ * Put a link to that image in previewUI
17
  * Put a video in Assets/StreamingAssets folder and put the name of it int videoName
18
+ * Or put a test image in inputImage
19
+ * Set inputType to appropriate input
20
  */
21
 
22
 
23
  public class RunBlazeFace : MonoBehaviour
24
  {
25
  //Drag a link to a raw image here:
26
+ public RawImage previewUI = null;
27
 
28
  // Put your bounding box sprite image here
29
  public Sprite faceTexture;
 
34
  public string videoName = "chatting.mp4";
35
 
36
  //
37
+ public Texture2D inputImage;
38
 
39
+ public InputType inputType = InputType.Video;
40
 
41
+ Vector2Int resolution = new Vector2Int(640, 640);
42
+ WebCamTexture webcam;
43
+ VideoPlayer video;
44
 
45
  const BackendType backend = BackendType.GPUCompute;
46
 
47
+ RenderTexture targetTexture;
48
  public enum InputType { Image, Video, Webcam };
49
 
50
 
51
  //Some adjustable parameters for the model
52
+ [SerializeField, Range(0, 1)] float iouThreshold = 0.5f;
53
+ [SerializeField, Range(0, 1)] float scoreThreshold = 0.5f;
54
+ int maxOutputBoxes = 64;
55
 
56
+ IWorker worker;
57
 
58
  //Holds image size
59
+ int size;
60
 
61
  Ops ops;
62
  ITensorAllocator allocator;
63
 
64
+ Model model;
65
 
66
  //webcam device name:
67
+ const string deviceName = "";
68
 
69
  bool closing = false;
70
 
 
82
 
83
  //(Note: if using a webcam on mobile get permissions here first)
84
 
85
+ targetTexture = new RenderTexture(resolution.x, resolution.y, 0);
86
 
87
  SetupInput();
88
 
 
93
 
94
  void SetupInput()
95
  {
96
+ switch (inputType)
97
  {
98
  case InputType.Webcam:
99
  {
100
+ webcam = new WebCamTexture(deviceName, resolution.x, resolution.y);
101
+ webcam.requestedFPS = 30;
102
+ webcam.Play();
103
  break;
104
  }
105
  case InputType.Video:
106
  {
107
+ video = gameObject.AddComponent<VideoPlayer>();//new VideoPlayer();
108
+ video.renderMode = VideoRenderMode.APIOnly;
109
+ video.source = VideoSource.Url;
110
+ video.url = Application.streamingAssetsPath + "/"+videoName;
111
+ video.isLooping = true;
112
+ video.Play();
113
  break;
114
  }
115
  default:
116
  {
117
+ Graphics.Blit(inputImage, targetTexture);
118
  }
119
  break;
120
  }
 
122
 
123
  void Update()
124
  {
125
+ if (inputType == InputType.Webcam)
126
  {
127
  // Format video input
128
+ if (!webcam.didUpdateThisFrame) return;
129
 
130
+ var aspect1 = (float)webcam.width / webcam.height;
131
+ var aspect2 = (float)resolution.x / resolution.y;
132
  var gap = aspect2 / aspect1;
133
 
134
+ var vflip = webcam.videoVerticallyMirrored;
135
  var scale = new Vector2(gap, vflip ? -1 : 1);
136
  var offset = new Vector2((1 - gap) / 2, vflip ? 1 : 0);
137
 
138
+ Graphics.Blit(webcam, targetTexture, scale, offset);
139
  }
140
+ if (inputType == InputType.Video)
141
  {
142
+ var aspect1 = (float)video.width / video.height;
143
+ var aspect2 = (float)resolution.x / resolution.y;
144
  var gap = aspect2 / aspect1;
145
 
146
  var vflip = false;
147
  var scale = new Vector2(gap, vflip ? -1 : 1);
148
  var offset = new Vector2((1 - gap) / 2, vflip ? 1 : 0);
149
+ Graphics.Blit(video.texture, targetTexture, scale, offset);
150
  }
151
+ if (inputType == InputType.Image)
152
  {
153
+ Graphics.Blit(inputImage, targetTexture);
154
  }
155
 
156
  if (Input.GetKeyDown(KeyCode.Escape))
 
161
 
162
  if (Input.GetKeyDown(KeyCode.P))
163
  {
164
+ previewUI.enabled = !previewUI.enabled;
165
  }
166
  }
167
 
 
170
  {
171
  if (!closing)
172
  {
173
+ RunInference(targetTexture);
174
  }
175
  }
176
 
 
198
  {
199
  float[] offsets = GetGridBoxCoords();
200
 
201
+ model = ModelLoader.Load(Application.streamingAssetsPath + "/blazeface.sentis");
202
 
203
  //We need to add extra layers to the model in order to aggregate the box predicions:
204
 
205
+ size = model.inputs[0].shape.ToTensorShape()[1]; // Input tensor width
206
 
207
+ model.AddConstant(new Lays.Constant("zero", new int[] { 0 }));
208
+ model.AddConstant(new Lays.Constant("two", new int[] { 2 }));
209
+ model.AddConstant(new Lays.Constant("four", new int[] { 4 }));
210
 
211
+ model.AddLayer(new Lays.Slice("boxes1", "regressors", "zero", "four", "two"));
212
 
213
+ model.AddLayer(new Lays.Transpose("scores", "classificators", new int[] { 0, 2, 1 }));
214
 
215
+ model.AddConstant(new Lays.Constant("eighth", new float[] { 1 / 8f }));
216
+ model.AddConstant(new Lays.Constant("offsets",
217
  new TensorFloat(new TensorShape(1, 896, 4), offsets)
218
  ));
219
+ model.AddLayer(new Lays.Mul("boxes1scaled", "boxes1", "eighth"));
220
+ model.AddLayer(new Lays.Add("boxCoords", "boxes1scaled", "offsets"));
221
+ model.AddOutput("boxCoords");
222
 
223
+ model.AddConstant(new Lays.Constant("maxOutputBoxes", new int[] { maxOutputBoxes }));
224
+ model.AddConstant(new Lays.Constant("iouThreshold", new float[] { iouThreshold }));
225
+ model.AddConstant(new Lays.Constant("scoreThreshold", new float[] { scoreThreshold }));
226
 
227
+ model.AddLayer(new Lays.NonMaxSuppression("NMS", "boxCoords", "scores",
228
  "maxOutputBoxes", "iouThreshold", "scoreThreshold",
229
  centerPointBox: Lays.CenterPointBox.Center
230
  ));
231
+ model.AddOutput("NMS");
232
  }
233
  public void SetupEngine()
234
  {
235
+ worker = WorkerFactory.CreateWorker(backend, model);
236
  ops = WorkerFactory.CreateOps(backend, allocator);
237
  }
238
 
 
269
  void ExecuteML(Texture source)
270
  {
271
  var transform = new TextureTransform();
272
+ transform.SetDimensions(size, size, 3);
273
  transform.SetTensorLayout(0, 3, 1, 2);
274
  using var image0 = TextureConverter.ToTensor(source, transform);
275
 
276
  // Pre-process the image to make input in range (-1..1)
277
  using var image = ops.Mad(image0, 2f, -1f);
278
 
279
+ worker.Execute(image);
280
 
281
+ using var boxCoords = worker.PeekOutput("boxCoords") as TensorFloat; //face coords
282
+ using var regressors = worker.PeekOutput("regressors") as TensorFloat; //contains markers
283
 
284
+ var NM1 = worker.PeekOutput("NMS") as TensorInt;
285
 
286
  using var boxCoords2 = boxCoords.ShallowReshape(boxCoords.shape.Unsqueeze(0)) as TensorFloat;
287
  using var output = ops.GatherND(boxCoords2, NM1, 0);
 
294
 
295
  ClearAnnotations();
296
 
297
+ Vector2 markerScale = previewUI.rectTransform.rect.size/ 16;
298
 
299
  DrawFaces(output, markersOutput, output.shape[0], markerScale);
300
 
 
305
  // Face detection
306
  ExecuteML(input);
307
 
308
+ previewUI.texture = input;
309
  }
310
 
311
  public void DrawBox(BoundingBox box, Sprite sprite)
 
313
  var panel = new GameObject("ObjectBox");
314
  panel.AddComponent<CanvasRenderer>();
315
  panel.AddComponent<Image>();
316
+ panel.transform.SetParent(previewUI.transform, false);
317
 
318
  var img = panel.GetComponent<Image>();
319
  img.color = Color.white;
 
326
  }
327
  public void ClearAnnotations()
328
  {
329
+ foreach (Transform child in previewUI.transform)
330
  {
331
  Destroy(child.gameObject);
332
  }
 
337
  closing = true;
338
  ops?.Dispose();
339
  allocator?.Dispose();
340
+ if (webcam) Destroy(webcam);
341
+ if (video) Destroy(video);
342
  RenderTexture.active = null;
343
+ targetTexture.Release();
344
+ worker?.Dispose();
345
+ worker = null;
346
  }
347
 
348
  void OnDestroy()