top5_error_rate / top5_error_rate.py
Aye10032's picture
update
5453f99
raw
history blame
2.43 kB
from typing import Dict, Any
import datasets
import evaluate
from evaluate.utils.file_utils import add_start_docstrings
_DESCRIPTION = """
The "top-5 error" is the percentage of times that the target label does not appear among the 5 highest-probability predictions. It can be computed with:
Top-5 Error Rate = 1 - Top-5 Accuracy
or equivalently:
Top-5 Error Rate = (Number of incorrect top-5 predictions) / (Total number of cases processed)
Where:
- Top-5 Accuracy: The proportion of cases where the true label is among the model's top 5 predicted classes.
- Incorrect top-5 prediction: The true label is not in the top 5 predicted classes (ranked by probability).
"""
_KWARGS_DESCRIPTION = """
Args:
predictions (`list` of `list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
Returns:
accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input.
Examples:
>>> accuracy_metric = evaluate.load("accuracy")
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0])
>>> print(results)
{'accuracy': 0.5}
"""
_CITATION = """
"""
@add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Top5ErrorRate(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(list[datasets.Value("int32")]),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Value("int32"),
}
),
reference_urls=[],
)
def _compute(
self,
*,
predictions: list[list[int]] = None,
references: list[int] = None,
**kwargs,
) -> Dict[str, Any]:
total = len(references)
correct = sum(1 for pred, ref in zip(predictions, references) if ref in pred)
error_rate = 1.0 - (correct / total)
return {
"top5_error_rate": float(error_rate)
}