Victoria Oberascher commited on
Commit
0c1d03a
·
1 Parent(s): 870b63f

add fiftyone example in readme

Browse files
Files changed (1) hide show
  1. README.md +31 -10
README.md CHANGED
@@ -34,25 +34,46 @@ To get started with horizon-metrics, make sure you have the necessary dependenci
34
  This is how you can quickly evaluate your horizon prediction models using SEA-AI/horizon-metrics:
35
 
36
  ```python
 
37
  import evaluate
38
 
 
39
  ground_truth_points = [[[0.0, 0.5384765625], [1.0, 0.4931640625]],
40
- [[0.0, 0.53796875], [1.0, 0.4928515625]],
41
- [[0.0, 0.5374609375], [1.0, 0.4925390625]],
42
- [[0.0, 0.536953125], [1.0, 0.4922265625]],
43
- [[0.0, 0.5364453125], [1.0, 0.4919140625]]]
44
 
45
  prediction_points = [[[0.0, 0.5428930956049597], [1.0, 0.4642497615378973]],
46
- [[0.0, 0.5428930956049597], [1.0, 0.4642497615378973]],
47
- [[0.0, 0.523573113510805], [1.0, 0.47642688648919496]],
48
- [[0.0, 0.5200016849393765], [1.0, 0.4728554579177664]],
49
- [[0.0, 0.523573113510805], [1.0, 0.47642688648919496]]]
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  module = evaluate.load("SEA-AI/horizon-metrics")
53
  module.add(predictions=ground_truth_points, references=prediction_points)
54
  module.compute()
55
-
56
  ```
57
 
58
  This is output the evalutaion metrics for your horizon prediciton model:
 
34
  This is how you can quickly evaluate your horizon prediction models using SEA-AI/horizon-metrics:
35
 
36
  ```python
37
+
38
  import evaluate
39
 
40
+ # Use artificial data for testing or
41
  ground_truth_points = [[[0.0, 0.5384765625], [1.0, 0.4931640625]],
42
+ [[0.0, 0.53796875], [1.0, 0.4928515625]],
43
+ [[0.0, 0.5374609375], [1.0, 0.4925390625]],
44
+ [[0.0, 0.536953125], [1.0, 0.4922265625]],
45
+ [[0.0, 0.5364453125], [1.0, 0.4919140625]]]
46
 
47
  prediction_points = [[[0.0, 0.5428930956049597], [1.0, 0.4642497615378973]],
48
+ [[0.0, 0.5428930956049597], [1.0, 0.4642497615378973]],
49
+ [[0.0, 0.523573113510805], [1.0, 0.47642688648919496]],
50
+ [[0.0, 0.5200016849393765], [1.0, 0.4728554579177664]],
51
+ [[0.0, 0.523573113510805], [1.0, 0.47642688648919496]]]
52
+
53
+ # Load data from fiftyone
54
+ sequence = "Sentry_2023_02_Portugal_2023_01_24_19_15_17"
55
+ dataset_name = "SENTRY_VIDEOS_DATASET_QA"
56
+ sequence_view = fo.load_dataset(dataset_name).match(F("sequence") == sequence)
57
+ sequence_view = sequence_view.select_group_slices("thermal_wide")
58
+
59
+ # Get the ground truth points
60
+ polylines_gt = sequence_view.values("frames.ground_truth_pl")
61
+ ground_truth_points = [
62
+ line["polylines"][0]["points"][0] for line in polylines_gt[0]
63
+ if line is not None
64
+ ]
65
+
66
+ # Get the predicted points
67
+ polylines_pred = sequence_view.values(
68
+ "frames.ahoy-IR-b2-whales__XAVIER-AGX-JP46_pl")
69
+ prediction_points = [
70
+ line["polylines"][0]["points"][0] for line in polylines_pred[0]
71
+ if line is not None
72
+ ]
73
 
74
  module = evaluate.load("SEA-AI/horizon-metrics")
75
  module.add(predictions=ground_truth_points, references=prediction_points)
76
  module.compute()
 
77
  ```
78
 
79
  This is output the evalutaion metrics for your horizon prediciton model: