Aye10032 commited on
Commit
7c54d17
·
1 Parent(s): 9bf4c3e
Files changed (2) hide show
  1. README.md +14 -1
  2. top5_error_rate.py +8 -3
README.md CHANGED
@@ -7,6 +7,19 @@ sdk: gradio
7
  sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
7
  sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
+ tags:
11
+ - evaluate
12
+ - metric
13
  ---
14
 
15
+ # Metric Card for Top-5 error rate
16
+
17
+ ## Metric Description
18
+
19
+ The "top-5 error" is the percentage of times that the target label does not appear among the 5 highest-probability predictions. It can be computed with:
20
+ Top-5 Error Rate = 1 - Top-5 Accuracy
21
+ or equivalently:
22
+ Top-5 Error Rate = (Number of incorrect top-5 predictions) / (Total number of cases processed)
23
+ Where:
24
+ - Top-5 Accuracy: The proportion of cases where the true label is among the model's top 5 predicted classes.
25
+ - Incorrect top-5 prediction: The true label is not in the top 5 predicted classes (ranked by probability).
top5_error_rate.py CHANGED
@@ -2,7 +2,6 @@ from typing import Dict, Any
2
 
3
  import datasets
4
  import evaluate
5
- import numpy as np
6
  from evaluate.utils.file_utils import add_start_docstrings
7
 
8
  _DESCRIPTION = """
@@ -43,9 +42,16 @@ class Top5ErrorRate(evaluate.Metric):
43
  inputs_description=_KWARGS_DESCRIPTION,
44
  features=datasets.Features(
45
  {
46
- "predictions": datasets.Sequence(datasets.Value("int32")),
 
 
47
  "references": datasets.Sequence(datasets.Value("int32")),
48
  }
 
 
 
 
 
49
  ),
50
  reference_urls=[],
51
  )
@@ -57,7 +63,6 @@ class Top5ErrorRate(evaluate.Metric):
57
  references: list[int] = None,
58
  **kwargs,
59
  ) -> Dict[str, Any]:
60
-
61
  total = len(references)
62
  correct = sum(1 for pred, ref in zip(predictions, references) if ref in pred)
63
 
 
2
 
3
  import datasets
4
  import evaluate
 
5
  from evaluate.utils.file_utils import add_start_docstrings
6
 
7
  _DESCRIPTION = """
 
42
  inputs_description=_KWARGS_DESCRIPTION,
43
  features=datasets.Features(
44
  {
45
+ "predictions": datasets.Sequence(
46
+ datasets.Sequence(datasets.Value("int32"))
47
+ ),
48
  "references": datasets.Sequence(datasets.Value("int32")),
49
  }
50
+ if self.config_name == "multilabel"
51
+ else {
52
+ "predictions": datasets.Sequence(datasets.Value("int32")),
53
+ "references": datasets.Value("int32"),
54
+ }
55
  ),
56
  reference_urls=[],
57
  )
 
63
  references: list[int] = None,
64
  **kwargs,
65
  ) -> Dict[str, Any]:
 
66
  total = len(references)
67
  correct = sum(1 for pred, ref in zip(predictions, references) if ref in pred)
68